From 6ab5756e3a00040b583fca7b57b8716f30e18f40 Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Tue, 14 Jan 2025 23:05:22 +0800 Subject: [PATCH 1/4] Add e2e test case for SecondaryNetwork of SR-IOV type Signed-off-by: Wenqi Qiu --- ci/jenkins/jobs/job-templates.yaml | 25 + ci/jenkins/jobs/projects-cloud.yaml | 146 +++++ ci/test-sriov-secondary-network-aws.sh | 582 ++++++++++++++++++ go.mod | 39 +- go.sum | 82 ++- .../s3uploader/s3uploader_test.go | 2 +- test/e2e-secondary-network/aws/ec2.go | 43 ++ .../sriov-network-attachment-definition.yml | 16 + .../infra/sriov-secondary-networks.yml | 16 + .../secondary_network_test.go | 129 +++- test/e2e/framework.go | 8 +- test/e2e/supportbundle_test.go | 2 +- 12 files changed, 990 insertions(+), 100 deletions(-) create mode 100755 ci/test-sriov-secondary-network-aws.sh create mode 100644 test/e2e-secondary-network/aws/ec2.go create mode 100644 test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml create mode 100644 test/e2e-secondary-network/infra/sriov-secondary-networks.yml diff --git a/ci/jenkins/jobs/job-templates.yaml b/ci/jenkins/jobs/job-templates.yaml index 10b5829bf6e..4ddef22cb7b 100644 --- a/ci/jenkins/jobs/job-templates.yaml +++ b/ci/jenkins/jobs/job-templates.yaml @@ -425,3 +425,28 @@ started-status: '{started_status}' wrappers: '{wrappers}' +- job-template: + name: 'cloud-{name}-{test_name}-aws-cleanup' + node: '{node}' + triggers: '{triggers}' + builders: '{builders}' + parameters: '{parameters}' + description: '{description}' + block-downstream: false + block-upstream: false + project-type: freestyle + properties: + - build-discarder: + artifact-days-to-keep: -1 + artifact-num-to-keep: -1 + days-to-keep: 30 + num-to-keep: 30 + - github: + url: '{repo_url}' + scm: + - git: + branches: '{branches}' + name: origin + url: '{repo_url}' + publishers: '{publishers}' + wrappers: '{wrappers}' diff --git a/ci/jenkins/jobs/projects-cloud.yaml b/ci/jenkins/jobs/projects-cloud.yaml index c54b7f49469..d0ad49c49a1 100644 --- a/ci/jenkins/jobs/projects-cloud.yaml +++ b/ci/jenkins/jobs/projects-cloud.yaml @@ -1608,3 +1608,149 @@ default-excludes: true fingerprint: false only-if-success: false + - 'cloud-{name}-{test_name}-aws-cleanup': + test_name: sriov-secondary-network + description: This is for deleting AWS resources for SRIOV secondary-network test. + parameters: + - string: + default: https://github.com/antrea-io/antrea/ + description: The repository to checkout Antrea for this test. + name: ANTREA_REPO + trim: 'true' + - string: + default: main + description: The branch or SHA commit ID to checkout and build Antrea for this test. + name: ANTREA_GIT_REVISION + trim: 'true' + - string: + default: '' + description: The ec2 instance id of control-plane Node. + name: CONTROLPLANE_INSTANCE_ID + trim: 'true' + - string: + default: '' + description: The ec2 instance id of worker Node. + name: WORKER_INSTANCE_ID + trim: 'true' + - string: + default: '' + description: The ec2 secondary network-interface id of control-plane Node. + name: CONTROLPLANE_NODE_ENI + trim: 'true' + - string: + default: '' + description: The ec2 secondary network-interface id of worker Node. + name: WORKER_NODE_ENI + trim: 'true' + - string: + default: '' + description: The subnet CIDR reservation id created in the subnet during test. + name: SUBNET_CIDR_RESERVATION_ID + trim: 'true' + builders: + - shell: |- + #!/bin/bash + set -ex + export CONTROLPLANE_INSTANCE_ID=${{CONTROLPLANE_INSTANCE_ID}} + export WORKER_INSTANCE_ID=${{WORKER_INSTANCE_ID}} + export CONTROLPLANE_NODE_ENI=${{CONTROLPLANE_NODE_ENI}} + export WORKER_NODE_ENI=${{WORKER_NODE_ENI}} + export SUBNET_CIDR_RES_ID=${{SUBNET_CIDR_RESERVATION_ID}} + source ./ci/test-sriov-secondary-network-aws.sh --cleanup-only \ + --aws-access-key ${{AWS_ACCESS_KEY}} --aws-secret-key ${{AWS_SECRET_KEY}} --aws-service-user-role-arn ${{AWS_SERVICE_USER_ROLE_ARN}} --aws-service-user ${{AWS_SERVICE_USER_NAME}} \ + --CONTROLPLANE_INSTANCE_ID ${{CONTROLPLANE_INSTANCE_ID}} + concurrent: false + disabled: false + node: antrea-cloud + branches: + - '${{ANTREA_GIT_REVISION}}' + repo_url: '${{ANTREA_REPO}}' + publishers: + - email: + notify-every-unstable-build: true + recipients: projectantrea-dev@googlegroups.com rahulj@vmware.com + triggers: [] + wrappers: + - credentials-binding: + - text: + credential-id: AWS_ACCESS_KEY # Jenkins secret that stores aws access key + variable: AWS_ACCESS_KEY + - text: + credential-id: AWS_SECRET_KEY # Jenkins secret that stores aws secret key + variable: AWS_SECRET_KEY + - text: + credential-id: AWS_SERVICE_USER_ROLE_ARN + variable: AWS_SERVICE_USER_ROLE_ARN + - text: + credential-id: AWS_SERVICE_USER_NAME + variable: AWS_SERVICE_USER_NAME + - '{name}-{test_name}-for-pull-request': + disabled: false + test_name: sriov-secondary-network-e2e + node: antrea-cloud-test + description: 'Test SR-IOV secondary network in AWS for antrea.' + branches: + - ${{sha1}} + trigger_phrase: '^(?!Thanks for your PR).*/test-sriov-secondary-network-e2e.*' + white_list_target_branches: [] + allow_whitelist_orgs_as_admins: true + admin_list: '{antrea_admin_list}' + org_list: '{antrea_org_list}' + white_list: '{antrea_white_list}' + only_trigger_phrase: true + trigger_permit_all: false + status_context: sriov-secondary-network-e2e + status_url: null + success_status: Build finished. + failure_status: Failed. Add comment /test-sriov-secondary-network-e2e to re-trigger. + error_status: Failed. Add comment /test-sriov-secondary-network-e2e to re-trigger. + triggered_status: null + started_status: null + builders: + - shell: |- + #!/bin/bash + cat ${{AWS_EC2_SSH_KEY}} > ./${{AWS_EC2_SSH_KEY_NAME}} + chmod 400 ${{AWS_EC2_SSH_KEY_NAME}} + DOCKER_REGISTRY="$(head -n1 ci/docker-registry)" + [ "$DOCKER_REGISTRY" != "docker.io" ] || ./ci/jenkins/docker_login.sh --docker-user ${{DOCKER_USERNAME}} --docker-password ${{DOCKER_PASSWORD}} + sudo ./ci/test-sriov-secondary-network-aws.sh --aws-access-key ${{AWS_ACCESS_KEY}} --aws-secret-key ${{AWS_SECRET_KEY}} \ + --aws-security-group-id ${{AWS_SECURITY_GROUP}} --aws-subnet-id ${{AWS_SUBNET_ID}} \ + --aws-ec2-ssh-key-name ${{AWS_EC2_SSH_KEY_NAME}} --aws-service-user-role-arn ${{AWS_SERVICE_USER_ROLE_ARN}} --aws-service-user ${{AWS_SERVICE_USER_NAME}} + publishers: + - email: + notify-every-unstable-build: true + recipients: projectantrea-dev@googlegroups.com + wrappers: + - credentials-binding: + - text: + credential-id: AWS_ACCESS_KEY # Jenkins secret that stores aws access key + variable: AWS_ACCESS_KEY + - text: + credential-id: AWS_SECRET_KEY # Jenkins secret that stores aws secret key + variable: AWS_SECRET_KEY + - text: + credential-id: DOCKER_USERNAME + variable: DOCKER_USERNAME + - text: + credential-id: DOCKER_PASSWORD + variable: DOCKER_PASSWORD + - text: + credential-id: AWS_EC2_SSH_KEY_NAME + variable: AWS_EC2_SSH_KEY_NAME + - text: + credential-id: AWS_SECURITY_GROUP + variable: AWS_SECURITY_GROUP + - text: + credential-id: AWS_SUBNET_ID + variable: AWS_SUBNET_ID + - text: + credential-id: AWS_SERVICE_USER_ROLE_ARN + variable: AWS_SERVICE_USER_ROLE_ARN + - text: + credential-id: AWS_SERVICE_USER_NAME + variable: AWS_SERVICE_USER_NAME + - ssh-user-private-key: + credential-id: AWS_EC2_SSH_KEY + key-file-variable: AWS_EC2_SSH_KEY + username-variable: AWS_EC2_SSH_USER_NAME + passphrase-variable: AWS_EC2_SSH_PASSPHRASE diff --git a/ci/test-sriov-secondary-network-aws.sh b/ci/test-sriov-secondary-network-aws.sh new file mode 100755 index 00000000000..2699ef6531c --- /dev/null +++ b/ci/test-sriov-secondary-network-aws.sh @@ -0,0 +1,582 @@ +#!/bin/bash +# Copyright 2025 Antrea Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -exu + +function echoerr { + >&2 echo "$@" +} + +TIMEOUT="10m" +K8S_VERSION="v1.32" +# Set AWS related variables +REGION="us-west-2" # AWS region +INSTANCE_TYPE="c3.large" # EC2 instance type +AMI_ID="ami-05d38da78ce859165" # AMI ID for Ubuntu 24.04 + +RUN_ALL=true +RUN_SETUP_ONLY=false +RUN_CLEANUP_ONLY=false + +SUBNET_CIDR_RES_ID="${SUBNET_CIDR_RES_ID:-}" + +_usage="Usage: $0 [--aws-access-key ] [--aws-secret-key ] \ + [--aws-security-group-id ] [--aws-subnet-id ] \ + [--aws-ec2-ssh-key-name ] + [--aws-service-user-role-arn ] [--aws-service-user ] \ + [--aws-region ] [--k8s-version ] + +Setup a Kubernetes cluster and test SR-IOV secondary network in AWS. + + --aws-access-key AWS Acess Key for logging in to awscli. + --aws-secret-key AWS Secret Key for logging in to awscli. + --aws-security-group-id Security group for the ec2 instance in the Kubernetes cluster. + --aws-subnet-id The subnet in which the ec2 instance network interface is located. + --aws-ec2-ssh-key-name The key name to be used for ssh access to ec2 instances. + --aws-service-user-role-arn AWS Service User Role ARN for logging in to awscli. + --aws-service-user AWS Service User Name for logging in to awscli. + --aws-region The AWS region where the cluster will be initiated. Defaults to $REGION. + --setup-only Only perform setting up the cluster and run test. + --cleanup-only Only perform cleaning up the cluster. + --k8s-version The K8s cluster version. Defaults to $K8S_VERSION." + +function print_usage { + echoerr "$_usage" +} + +function print_help { + echoerr "Try '$0 --help' for more information." +} + +while [[ $# -gt 0 ]] +do +key="$1" + +case $key in + --aws-access-key) + AWS_ACCESS_KEY="$2" + shift 2 + ;; + --aws-secret-key) + AWS_SECRET_KEY="$2" + shift 2 + ;; + --aws-security-group-id) + AWS_SECURITY_GROUP="$2" + shift 2 + ;; + --aws-subnet-id) + AWS_SUBNET_ID="$2" + shift 2 + ;; + --aws-ec2-ssh-key-name) + AWS_EC2_SSH_KEY_NAME="$2" + shift 2 + ;; + --aws-service-user-role-arn) + AWS_SERVICE_USER_ROLE_ARN="$2" + shift 2 + ;; + --aws-service-user) + AWS_SERVICE_USER_NAME="$2" + shift 2 + ;; + --aws-region) + REGION="$2" + shift 2 + ;; + --k8s-version) + K8S_VERSION="$2" + shift 2 + ;; + --setup-only) + RUN_SETUP_ONLY=true + RUN_ALL=false + shift + ;; + --cleanup-only) + RUN_CLEANUP_ONLY=true + RUN_ALL=false + shift + ;; + -h|--help) + print_usage + exit 0 + ;; + *) # unknown option + echoerr "Unknown option $1" + exit 1 + ;; +esac +done + +mkdir -p ~/.aws +cat > ~/.aws/config < ~/.aws/credentials </dev/null 2>&1 && pwd)" +ANTREA_CHART="$THIS_DIR/../build/charts/antrea" +ANTREA_TAR="antrea-ubuntu.tar" +DOCKER_IMAGE_PATH="$THIS_DIR/../$ANTREA_TAR" +SRIOV_SECONDARY_NETWORKS_YAML="$THIS_DIR/../test/e2e-secondary-network/infra/sriov-secondary-networks.yml" +IP_POOL_YAML="pool1.yaml" +NAD_YAML="$THIS_DIR/../test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml" + +CONTROLPLANE_IP="" +WORKER_IP="" + +CONTROLPLANE_INSTANCE_ID="${CONTROLPLANE_INSTANCE_ID:-}" +WORKER_INSTANCE_ID="${WORKER_INSTANCE_ID:-}" + +CONTROLPLANE_NODE_ENI="${CONTROLPLANE_NODE_ENI:-}" +WORKER_NODE_ENI="${WORKER_NODE_ENI:-}" + +# Function to launch EC2 instance +function launch_ec2_instance() { + local instance_name=$1 + instance_id=$(aws ec2 run-instances \ + --image-id $AMI_ID \ + --count 1 \ + --instance-type $INSTANCE_TYPE \ + --key-name "$AWS_EC2_SSH_KEY_NAME" \ + --security-group-ids "$AWS_SECURITY_GROUP" \ + --subnet-id "$AWS_SUBNET_ID" \ + --block-device-mappings DeviceName=/dev/sda1,Ebs={VolumeSize=20} \ + --query "Instances[0].InstanceId" \ + --output text) + echo "$instance_id" +} + +function attach_network_interface() { + local instance_id=$1 + local node_type=$2 + ENI_ID=$(aws ec2 create-network-interface \ + --subnet-id "$AWS_SUBNET_ID" \ + --groups "$AWS_SECURITY_GROUP" \ + --query 'NetworkInterface.NetworkInterfaceId' \ + --output text) + + echo "Network interface created successfully with ENI ID: $ENI_ID" + + # Attach the ENI to the EC2 instance + echo "Attaching network interface $ENI_ID to instance $instance_id ..." + ATTACHMENT_ID=$(aws ec2 attach-network-interface \ + --network-interface-id "$ENI_ID" \ + --instance-id "$instance_id" \ + --device-index 1 \ + --query 'AttachmentId' \ + --output text) + + echo "Network interface attached successfully with Attachment ID: $ATTACHMENT_ID" + + if [[ "$node_type" == "control-plane" ]]; then + CONTROLPLANE_NODE_ENI="$ENI_ID" + echo "Assigned ENI ID $ENI_ID to CONTROLPLANE_NODE_ENI" + elif [[ "$node_type" == "worker" ]]; then + WORKER_NODE_ENI="$ENI_ID" + echo "Assigned ENI ID $ENI_ID to WORKER_NODE_ENI." + else + echo "Invalid node type. Please specify 'control-plane' or 'worker'." + exit 1 + fi + + echo "Verifying the attachment..." + aws ec2 describe-instances \ + --instance-id "$instance_id" \ + --query "Reservations[0].Instances[0].NetworkInterfaces" \ + --output text +} + +# Function to get the public IP of an EC2 instance +function get_instance_ip() { + local instance_id=$1 + ip=$(aws ec2 describe-instances \ + --instance-ids "$instance_id" \ + --query "Reservations[0].Instances[0].PublicIpAddress" \ + --output text) + echo "$ip" +} + +# Function to install Kubernetes on a node +function install_kubernetes() { + local node_ip=$1 + retry_count=20 + until ssh -o StrictHostKeyChecking=no -o ConnectTimeout=10 -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$node_ip" exit; do + echo "SSH connection failed. Retrying in 10 seconds..." + sleep 10 + retry_count=$((retry_count-1)) + if [ $retry_count -le 0 ]; then + echo "Max retries reached. Exiting." + exit 1 + fi + done + echo "Installing Kubernetes on node $node_ip..." + ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$node_ip" << EOF + sudo apt update && sudo apt upgrade -y + sudo apt install -y docker.io + sudo docker --version + + sudo apt-get update + sudo apt-get install apt-transport-https ca-certificates curl gpg + + sudo install -m 0755 -d /etc/apt/keyrings + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc + sudo chmod a+r /etc/apt/keyrings/docker.asc + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt-get update + sudo apt-get install containerd.io -y + sudo mkdir -p /etc/containerd + sudo containerd config default | sudo tee /etc/containerd/config.toml + sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml + + sudo systemctl restart containerd + sudo systemctl enable containerd + + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/$K8S_VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list + curl -fsSL https://pkgs.k8s.io/core:/stable:/$K8S_VERSION/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + sudo apt update + sudo apt install -y kubelet kubeadm kubectl + sudo apt-mark hold kubelet kubeadm kubectl + sudo swapoff -a + sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab +EOF +} + +# Function to initialize the Kubernetes control-plane node +function initialize_control_plane_node() { + local control_plane_node_ip=$1 + echo "Initializing Kubernetes control-plane node on $control_plane_node_ip..." + + ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$control_plane_node_ip" << EOF + sudo kubeadm init --pod-network-cidr=10.244.0.0/16 + mkdir -p \$HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf \$HOME/.kube/config + sudo chown \$(id -u):\$(id -g) \$HOME/.kube/config +EOF +} + +# Function to get the join command for the worker node +function get_join_command() { + local control_plane_node_ip=$1 + join_command=$(ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$control_plane_node_ip" "sudo kubeadm token create --print-join-command") + echo "$join_command" +} + +# Function to join the worker node to the cluster +function join_worker() { + local worker_ip=$1 + local join_command=$2 + echo "Joining worker node with IP $worker_ip to the Kubernetes cluster..." + + ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@$worker_ip << EOF + sudo $join_command +EOF +} + +# Function to verify the Kubernetes cluster status +function verify_cluster() { + local control_plane_node_ip=$1 + echo "Verifying Kubernetes cluster status on control-plane node $control_plane_node_ip..." + + ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@$control_plane_node_ip << EOF + kubectl get nodes +EOF +} + +function setup_cluster { + # Launch Master and Worker EC2 instances + echo "Launching EC2 instance ..." + CONTROLPLANE_INSTANCE_ID=$(launch_ec2_instance "ControlPlane") + echo "ControlPlane EC2 instance launched with Instance ID: $CONTROLPLANE_INSTANCE_ID" + WORKER_INSTANCE_ID=$(launch_ec2_instance "Worker") + echo "Worker EC2 instance launched with Instance ID: $WORKER_INSTANCE_ID" + + # Wait for EC2 instances to be fully running + echo "Waiting for EC2 instances to be running..." + aws ec2 wait instance-running --instance-ids $CONTROLPLANE_INSTANCE_ID $WORKER_INSTANCE_ID + + attach_network_interface "$CONTROLPLANE_INSTANCE_ID" "control-plane" + attach_network_interface "$WORKER_INSTANCE_ID" "worker" + + echo "======== CONTROLPLANE_INSTANCE_ID: $CONTROLPLANE_INSTANCE_ID, WORKER_INSTANCE_ID: $WORKER_INSTANCE_ID =========" + + # Get the public IP addresses of the instances + CONTROLPLANE_IP=$(get_instance_ip "$CONTROLPLANE_INSTANCE_ID") + echo "Get PublicIpAddress of EC2 Instance $CONTROLPLANE_INSTANCE_ID: $CONTROLPLANE_IP" + WORKER_IP=$(get_instance_ip "$WORKER_INSTANCE_ID") + echo "Get PublicIpAddress of EC2 Instance $WORKER_INSTANCE_ID: $WORKER_IP" + + echo "====== CONTROLPLANE_IP: $CONTROLPLANE_IP, WORKER_IP: $WORKER_IP ======" + + # Install Kubernetes on both control-plane and worker nodes + install_kubernetes "$CONTROLPLANE_IP" + install_kubernetes "$WORKER_IP" + + # Initialize Kubernetes control-plane Node, if it fails, it will not exit + initialize_control_plane_node "$CONTROLPLANE_IP" || { + echo "Failed to initialize control-plane Node. Re-running installation for both nodes." + # Re-run the install commands for both control-plane and worker nodes + install_kubernetes "$CONTROLPLANE_IP" + install_kubernetes "$WORKER_IP" + # After re-installing, attempt the initialization again + initialize_control_plane_node "$CONTROLPLANE_IP" || { + echo "Initialization ControlPlane failed again, please check logs." + exit 1 + } + } + + # Get the join command and join the worker node to the cluster + JOIN_COMMAND=$(get_join_command "$CONTROLPLANE_IP") + join_worker "$WORKER_IP" "$JOIN_COMMAND" + + # Verify the Kubernetes cluster status + verify_cluster "$CONTROLPLANE_IP" + + echo "Kubernetes cluster setup completed!" +} + +function build_image() { + chmod -R g-w build/images/ovs + chmod -R g-w build/images/base + ./hack/build-antrea-linux-all.sh --pull + docker save antrea/antrea-agent-ubuntu:latest antrea/antrea-controller-ubuntu:latest -o $ANTREA_TAR +} + +# Function to upload Docker image and load it +function upload_and_load_image() { + local node_ip=$1 + local image_path=$2 + echo "Uploading Docker image $image_path to node $node_ip..." + + # Copy the Docker image tarball to the node + scp -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" "$image_path" ubuntu@"$node_ip":/home/ubuntu/ + + # SSH into the node and load the image + ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$node_ip" << EOF + sudo ctr images import /home/ubuntu/$(basename "$image_path") + sudo ctr images ls + # remove the tarball after loading + sudo rm /home/ubuntu/$(basename "$image_path") +EOF +} + +function deploy_antrea() { + echo "Deploy antrea on cluster..." + helm install antrea "$ANTREA_CHART" --namespace kube-system --set featureGates.SecondaryNetwork=true,featureGates.AntreaIPAM=true + kubectl rollout status --timeout=2m deployment.apps/antrea-controller -n kube-system + kubectl rollout status --timeout=2m daemonset/antrea-agent -n kube-system + kubectl get node -owide + kubectl get pods -A +} + +# Specify the output config file +SSH_CONFIG_FILE=$THIS_DIR/"k8s_nodes_config" +KUBECONFIG_FILE=$THIS_DIR/"remote.kube" +: > "$SSH_CONFIG_FILE" +: > "$KUBECONFIG_FILE" + +# Function to get the node IP and name, then write to a config file. +function generate_ssh_config() { + # Get the nodes' names and their external IPs + scp -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$CONTROLPLANE_IP":/home/ubuntu/.kube/config "$KUBECONFIG_FILE" + export KUBECONFIG=$KUBECONFIG_FILE + kubectl get nodes -o wide | tail -n +2 | while read -r line; do + # Extract node name and IP address + NODE_NAME=$(echo "$line" | awk '{print $1}') + NODE_IP=$(echo "$line" | awk '{print $6}') + + # Write the SSH configuration to the file + # shellcheck disable=SC2129 + echo -e "Host $NODE_NAME" >> "$SSH_CONFIG_FILE" + echo -e "\tHostName $NODE_IP" >> "$SSH_CONFIG_FILE" + echo -e "\tPort 22" >> "$SSH_CONFIG_FILE" + echo -e "\tUser ubuntu" >> "$SSH_CONFIG_FILE" + echo -e "\tIdentityFile $THIS_DIR/../$AWS_EC2_SSH_KEY_NAME" >> "$SSH_CONFIG_FILE" + done + + echo "SSH config written to $SSH_CONFIG_FILE" +} + +function create_ippool_and_network_attachment_definition() { + subnet_info=$(aws ec2 describe-subnets --subnet-ids "$AWS_SUBNET_ID" --query 'Subnets[0].{CIDR:CidrBlock}' --output json) + subnet_cidr=$(echo "$subnet_info" | jq -r '.CIDR') + + # Ensure valid CIDR is fetched + if [ "$subnet_cidr" == "null" ] || [ -z "$subnet_cidr" ]; then + echo "Error: Unable to fetch CIDR block for subnet $AWS_SUBNET_ID." + exit 1 + fi + + NETWORK=$(echo "$subnet_cidr" | cut -d/ -f1) + NETMASK=$(echo "$subnet_cidr" | cut -d/ -f2) + echo "Subnet CIDR network: $NETWORK" + echo "Subnet CIDR netmask: $NETMASK" + + ip_pool_cidr="$(echo "$NETWORK" | awk -F'.' '{print $1 "." $2 "." $3 "." 192}')/26" + echo "Creating CIDR reservation $ip_pool_cidr in subnet $AWS_SUBNET_ID..." + aws ec2 create-subnet-cidr-reservation --subnet-id "$AWS_SUBNET_ID" --reservation-type explicit --cidr "$ip_pool_cidr" + + SUBNET_CIDR_RES_ID=$(aws ec2 get-subnet-cidr-reservations --subnet-id "$AWS_SUBNET_ID" --query 'SubnetIpv4CidrReservations[0].SubnetCidrReservationId' --output text) + if [ "$SUBNET_CIDR_RES_ID" == "None" ]; then + echo "Error: Failed to create subnet CIDR reservation." + exit 1 + fi + + # Print the Subnet CIDR Reservation ID for reference + echo "CIDR reservation created with ID: $SUBNET_CIDR_RES_ID" + + gateway=$(echo $NETWORK | awk -F'.' '{print $1 "." $2 "." $3 "." $4+1}') + + # Create IP Pool YAML file + echo "Generating IPPool YAML..." + cat << EOF > "$IP_POOL_YAML" +apiVersion: crd.antrea.io/v1beta1 +kind: IPPool +metadata: + name: pool1 +spec: + ipRanges: + - cidr: $ip_pool_cidr + subnetInfo: + gateway: $gateway + prefixLength: $NETMASK +EOF + echo "Created IP Pool YAML file: $IP_POOL_YAML" + cat $IP_POOL_YAML + kubectl apply -f $IP_POOL_YAML + + # Create NetworkAttachmentDefinition + kubectl apply -f "$NAD_YAML" + echo "Created NetworkAttachmentDefinition" +} + +function run_test() { + kubectl apply -f "$SRIOV_SECONDARY_NETWORKS_YAML" + kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/sriov-network-device-plugin/refs/heads/master/deployments/sriovdp-daemonset.yaml + kubectl get nodes -o go-template='{{range .items}}{{.metadata.name}}{{" "}}{{.status.allocatable}}{{"\n"}}{{end}}' + kubectl apply -f https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/raw/master/artifacts/networks-crd.yaml + create_ippool_and_network_attachment_definition + kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true + CONTROLPLANE_NODE=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].metadata.name}') + WORKER_NODE=$(kubectl get nodes -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{.items[0].metadata.name}') + kubectl label node "$CONTROLPLANE_NODE" eni-id="$CONTROLPLANE_NODE_ENI" + kubectl label node "$WORKER_NODE" eni-id="$WORKER_NODE_ENI" + + go test -v -timeout="$TIMEOUT" antrea.io/antrea/test/e2e-secondary-network -run=TestSRIOVNetwork -provider=remote -remote.sshconfig="$SSH_CONFIG_FILE" -remote.kubeconfig="$KUBECONFIG_FILE" -deploy-antrea=false +} + +function clean_up() { + set +e + INSTANCE_ID=$1 + ENI_ID=$2 + + echo "Terminating EC2 instance: $INSTANCE_ID" + aws ec2 terminate-instances --instance-ids "$INSTANCE_ID" + + # Check if the EC2 instance termination was successful + if [ $? -eq 0 ]; then + echo "Successfully terminated EC2 instance: $INSTANCE_ID" + else + echo "Failed to terminate EC2 instance: $INSTANCE_ID" + return 1 + fi + + # Wait for the instance to reach the 'terminated' state + echo "Waiting for EC2 instance to terminate..." + if ! aws ec2 wait instance-terminated --instance-ids "$INSTANCE_ID"; then + echo "EC2 instance did not terminate successfully: $INSTANCE_ID" + return 1 + fi + echo "EC2 instance terminated successfully: $INSTANCE_ID" + + echo "Detaching network interface: $ENI_ID" + ATTACHMENT_ID=$(aws ec2 describe-network-interfaces --network-interface-ids "$ENI_ID" --query "NetworkInterfaces[0].Attachment.AttachmentId" --output text) + + if [ "$ATTACHMENT_ID" != "None" ]; then + aws ec2 detach-network-interface --attachment-id "$ATTACHMENT_ID" + if [ $? -eq 0 ]; then + echo "Successfully detached network interface: $ENI_ID" + else + echo "Failed to detach network interface: $ENI_ID" + return 1 + fi + # Wait for the ENI to become available + echo "Waiting for network interface to become available..." + if ! aws ec2 wait network-interface-available --network-interface-ids "$ENI_ID"; then + echo "Network interface did not become available: $ENI_ID" + return 1 + fi + echo "Network interface is now available: $ENI_ID" + else + echo "Network interface is not attached to any instance." + fi + + echo "Deleting network interface: $ENI_ID" + aws ec2 delete-network-interface --network-interface-id "$ENI_ID" + if [ $? -eq 0 ]; then + echo "Successfully deleted network interface: $ENI_ID" + else + echo "Failed to delete network interface: $ENI_ID" + return 1 + fi + set -e +} + +function delete_subnet_cidr_reservation() { + set +e + echo "Deleting subnet cidr reservation: $SUBNET_CIDR_RES_ID" + aws ec2 delete-subnet-cidr-reservation --subnet-cidr-reservation-id "$SUBNET_CIDR_RES_ID" + if [ $? -eq 0 ]; then + echo "Successfully deleted subnet cidr reservation: $SUBNET_CIDR_RES_ID" + else + echo "Failed to delete subnet cidr reservation: $SUBNET_CIDR_RES_ID" + fi + set -e +} + +function clean_up_all() { + clean_up "$CONTROLPLANE_INSTANCE_ID" "$CONTROLPLANE_NODE_ENI" + clean_up "$WORKER_INSTANCE_ID" "$WORKER_NODE_ENI" + delete_subnet_cidr_reservation +} + +echo "===========Test SR-IOV secondary network in AWS=============" + +if [[ "$RUN_SETUP_ONLY" != true ]]; then + trap clean_up_all EXIT +fi + +if [[ "$RUN_ALL" == true || "$RUN_SETUP_ONLY" == true ]]; then + setup_cluster + build_image + upload_and_load_image "$CONTROLPLANE_IP" "$DOCKER_IMAGE_PATH" + upload_and_load_image "$WORKER_IP" "$DOCKER_IMAGE_PATH" + generate_ssh_config + deploy_antrea + run_test +fi + +exit 0 diff --git a/go.mod b/go.mod index 4ef3eecae6a..7a034ac7c47 100644 --- a/go.mod +++ b/go.mod @@ -11,10 +11,11 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/hcsshim v0.11.4 github.com/TomCodeLV/OVSDB-golang-lib v0.0.0-20200116135253-9bbdfadcd881 - github.com/aws/aws-sdk-go-v2 v1.16.10 - github.com/aws/aws-sdk-go-v2/config v1.16.0 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23 - github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4 + github.com/aws/aws-sdk-go-v2 v1.36.1 + github.com/aws/aws-sdk-go-v2/config v1.29.6 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.61 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 github.com/blang/semver v3.5.1+incompatible github.com/cheggaaa/pb/v3 v3.1.6 github.com/containernetworking/cni v1.2.0 @@ -98,20 +99,21 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.12.12 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 // indirect - github.com/aws/smithy-go v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.59 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 // indirect + github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -163,7 +165,6 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index 2c35ff741df..06b48df2c9d 100644 --- a/go.sum +++ b/go.sum @@ -70,43 +70,46 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go-v2 v1.16.10 h1:+yDD0tcuHRQZgqONkpDwzepqmElQaSlFPymHRHR9mrc= -github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4 h1:zfT11pa7ifu/VlLDpmc5OY2W4nYmnKkFDGeMVnmqAI0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4/go.mod h1:ES0I1GBs+YYgcDS1ek47Erbn4TOL811JKqBXtgzqyZ8= -github.com/aws/aws-sdk-go-v2/config v1.15.17/go.mod h1:eatrtwIm5WdvASoYCy5oPkinfiwiYFg2jLG9tJoKzkE= -github.com/aws/aws-sdk-go-v2/config v1.16.0 h1:LxHC50cwOLxYo67NEpwpNUiOi6ngXfDpEETphSZ6bAw= -github.com/aws/aws-sdk-go-v2/config v1.16.0/go.mod h1:eatrtwIm5WdvASoYCy5oPkinfiwiYFg2jLG9tJoKzkE= -github.com/aws/aws-sdk-go-v2/credentials v1.12.12 h1:iShu6VaWZZZfUZvlGtRjl+g1lWk44g1QmiCTD4KS0jI= -github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 h1:zZHPdM2x09/0F8D7XyVvQnP2/jaW7bEMmtcSCPYq/iI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23 h1:lzS1GSHBzvBMlCA030/ecL5tF2ip8RLr/LBq5fBpv/4= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23/go.mod h1:yGuKwoNVv2eGUHlp7ciCQLHmFNeESebnHucZfRL9EkA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 h1:U8DZvyFFesBmK62dYC6BRXm4Cd/wPP3aPcecu3xv/F4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 h1:GMp98usVW5tzQhxd26KWhoNQPlR2noIlfbzqjVGBhLU= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 h1:/spg6h3tG4pefphbvhpgdMtFMegSajPPSEJd1t8lnpc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8 h1:9PY5a+kHQzC6d9eR+KLNSJP3DHDLYmPFA5/+eSDBo9o= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8/go.mod h1:pcQfUOFVK4lMnSzgX3dCA81UsA9YCilRUSYgkjSU2i8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4 h1:akfcyqM9SvrBKWZOkBcXAGDrHfKaEP4Aca8H/bCiLW8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4/go.mod h1:oehQLbMQkppKLXvpx/1Eo0X47Fe+0971DXC9UjGnKcI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12 h1:eNQYkKjDSLDjIbBQ85rIkjpBGgnavrl/U3YKDdxAz14= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12/go.mod h1:k2HaF2yfT082M+kKo3Xdf4rd5HGKvDmrPC5Kwzc2KUw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 h1:GkYtp4gi4wdWUV+pPetjk5y2aDxbr0t8n5OjVBwZdII= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11 h1:ZBLEKweAzBBtJa8H+MTFfVyvo+eHdM8xec5oTm9IlqI= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11/go.mod h1:mNS1VHxYXPNqxIdCTxf87j9ROfTMa4fNpIkA+iAfz0g= -github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4 h1:0RPAahwT63znFepvhfS+/WYtT+gEuAwaeNcCrzTQMH0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4/go.mod h1:wcpDmROpK5W7oWI6JcJIYGrVpHbF/Pu+FHxyBXyoa1E= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 h1:HaIE5/TtKr66qZTJpvMifDxH4lRt2JZawbkLYOo1F+Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 h1:YU9UHPukkCCnETHEExOptF/BxPvGJKXO/NBx+RMQ/2A= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4= -github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag= -github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go-v2 v1.36.1 h1:iTDl5U6oAhkNPba0e1t1hrwAo02ZMqbrGq4k5JBWM5E= +github.com/aws/aws-sdk-go-v2 v1.36.1/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6 h1:fqgqEKK5HaZVWLQoLiC9Q+xDlSp+1LYidp6ybGE2OGg= +github.com/aws/aws-sdk-go-v2/config v1.29.6/go.mod h1:Ft+WLODzDQmCTHDvqAH1JfC2xxbZ0MxpZAcJqmE1LTQ= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59 h1:9btwmrt//Q6JcSdgJOLI98sdr5p7tssS9yAsGe8aKP4= +github.com/aws/aws-sdk-go-v2/credentials v1.17.59/go.mod h1:NM8fM6ovI3zak23UISdWidyZuI1ghNe2xjzUZAyT+08= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28 h1:KwsodFKVQTlI5EyhRSugALzsV6mG/SGrdjlMXSZSdso= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.28/go.mod h1:EY3APf9MzygVhKuPXAc5H+MkGb8k/DOSQjWS0LgkKqI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.61 h1:BBIPjlEWLxX1huGTkBu/eeqyaXC0pVwDCYbQuE/JPfU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.61/go.mod h1:6dkLZQM1D/wKKFJEvyB1OCXJ0f68wcIPDOiXm0KyT8A= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32 h1:BjUcr3X3K0wZPGFg2bxOWW3VPN8rkE3/61zhP+IHviA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.32/go.mod h1:80+OGC/bgzzFFTUmcuwD0lb4YutwQeKLFpmt6hoWapU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32 h1:m1GeXHVMJsRsUAqG6HjZWx9dj7F5TR+cF1bjyfYyBd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.32/go.mod h1:IitoQxGfaKdVLNg0hD8/DXmAqNy0H4K2H2Sf91ti8sI= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32 h1:OIHj/nAhVzIXGzbAE+4XmZ8FPvro3THr6NlqErJc3wY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.32/go.mod h1:LiBEsDo34OJXqdDlRGsilhlIiXR7DL+6Cx2f4p1EgzI= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.0 h1:EDLBXOs5D0KUqDThg8ID63mK5E7lJ8pjHGBtix6O9j0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.0/go.mod h1:nSbxgPGhyI9j/cMVSHUEEtNQzEYeNOkbHnHNeTuQqt0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0 h1:kT2WeWcFySdYpPgyqJMSUE7781Qucjtn6wBvrgm9P+M= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.6.0/go.mod h1:WYH1ABybY7JK9TITPnk6ZlP7gQB8psI4c9qDmMsnLSA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13 h1:SYVGSFQHlchIcy6e7x12bsrxClCXSP5et8cqVhL8cuw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.13/go.mod h1:kizuDaLX37bG5WZaoxGPQR/LNFXpxp0vsUnqfkWXfNE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13 h1:OBsrtam3rk8NfBEq7OLOMm5HtQ9Yyw32X4UQMya/wjw= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.13/go.mod h1:3U4gFA5pmoCOja7aq4nSaIAGbaOHv2Yl2ug018cmC+Q= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1 h1:d4ZG8mELlLeUWFBMCqPtRfEP3J6aQgg/KTC9jLSlkMs= +github.com/aws/aws-sdk-go-v2/service/s3 v1.76.1/go.mod h1:uZoEIR6PzGOZEjgAZE4hfYfsqK2zOHhq68JLKEvvXj4= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15 h1:/eE3DogBjYlvlbhd2ssWyeuovWunHLxfgw3s/OJa4GQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.15/go.mod h1:2PCJYpi7EKeA5SkStAmZlF6fi0uUABuhtF8ILHjGc3Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14 h1:M/zwXiL2iXUrHputuXgmO94TVNmcenPHxgLXLutodKE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.14/go.mod h1:RVwIw3y/IqxC2YEXSIkAzRDdEU1iRabDPaYjpGCbCGQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14 h1:TzeR06UCMUq+KA3bDkujxK1GVGy+G8qQN/QVYzGLkQE= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.14/go.mod h1:dspXf/oYWGWo6DEvj98wpaTeqt5+DMidZD0A9BYTizc= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -360,7 +363,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -440,10 +442,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= diff --git a/pkg/flowaggregator/s3uploader/s3uploader_test.go b/pkg/flowaggregator/s3uploader/s3uploader_test.go index 999a2373f9e..df4dd887d4f 100644 --- a/pkg/flowaggregator/s3uploader/s3uploader_test.go +++ b/pkg/flowaggregator/s3uploader/s3uploader_test.go @@ -193,7 +193,7 @@ func TestBatchUploadAllError(t *testing.T) { assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) assert.Equal(t, "", s3UploadProc.currentBuffer.String()) assert.EqualValues(t, 0, s3UploadProc.cachedRecordCount) - expectedErrMsg := "error when uploading file to S3: operation error S3: PutObject, https response error StatusCode: 301" + expectedErrMsg := "error when uploading file to S3: operation error S3: PutObject" assert.Contains(t, err.Error(), expectedErrMsg) } diff --git a/test/e2e-secondary-network/aws/ec2.go b/test/e2e-secondary-network/aws/ec2.go new file mode 100644 index 00000000000..e969a5830cf --- /dev/null +++ b/test/e2e-secondary-network/aws/ec2.go @@ -0,0 +1,43 @@ +// Copyright 2025 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package aws + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/ec2" +) + +func AssignIPToEC2ENI(ctx context.Context, interfaceID, ip string) error { + cfg, err := config.LoadDefaultConfig(ctx, config.WithEC2IMDSRegion()) + if err != nil { + return fmt.Errorf("unable to load AWS configuration: %w", err) + } + client := ec2.NewFromConfig(cfg) + + ipInput := &ec2.AssignPrivateIpAddressesInput{ + AllowReassignment: aws.Bool(true), + NetworkInterfaceId: aws.String(interfaceID), + PrivateIpAddresses: []string{ip}, + } + _, err = client.AssignPrivateIpAddresses(ctx, ipInput) + if err != nil { + return fmt.Errorf("unable to assign IP %s to interface %s: %w", ip, interfaceID, err) + } + return nil +} diff --git a/test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml b/test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml new file mode 100644 index 00000000000..0accbde2536 --- /dev/null +++ b/test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml @@ -0,0 +1,16 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: sriov-net1 + annotations: + k8s.v1.cni.cncf.io/resourceName: intel.com/intel_sriov_netdevice +spec: + config: '{ + "cniVersion": "0.3.0", + "type": "antrea", + "networkType": "sriov", + "ipam": { + "type": "antrea", + "ippools": ["pool1"] + } + }' diff --git a/test/e2e-secondary-network/infra/sriov-secondary-networks.yml b/test/e2e-secondary-network/infra/sriov-secondary-networks.yml new file mode 100644 index 00000000000..259b9f4f0f0 --- /dev/null +++ b/test/e2e-secondary-network/infra/sriov-secondary-networks.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sriovdp-config + namespace: kube-system +data: + config.json: | + { + "resourceList": [{ + "resourceName": "intel_sriov_netdevice", + "selectors": { + "pciAddresses": ["0000:00:04.0"] + } + } + ] + } diff --git a/test/e2e-secondary-network/secondary_network_test.go b/test/e2e-secondary-network/secondary_network_test.go index 262069f137b..9da3b61449e 100644 --- a/test/e2e-secondary-network/secondary_network_test.go +++ b/test/e2e-secondary-network/secondary_network_test.go @@ -15,6 +15,7 @@ package e2esecondary import ( + "context" "fmt" "net" "strings" @@ -24,8 +25,11 @@ import ( logs "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" antreae2e "antrea.io/antrea/test/e2e" + "antrea.io/antrea/test/e2e-secondary-network/aws" ) type testPodInfo struct { @@ -55,7 +59,7 @@ const ( pingSize = 40 defaultTimeout = 10 * time.Second sriovReqName = "intel.com/intel_sriov_netdevice" - sriovResNum = 3 + sriovResNum = 1 ) // formAnnotationStringOfPod forms the annotation string, used in the generation of each Pod YAML file. @@ -228,39 +232,14 @@ func testSecondaryNetwork(t *testing.T, networkType string, pods []*testPodInfo) testData := &testData{e2eTestData: e2eTestData, networkType: networkType, pods: pods} - t.Run("testCreateTestPodOnNode", func(t *testing.T) { - testData.createPods(t, e2eTestData.GetTestNamespace()) - }) - t.Run("testpingBetweenInterfaces", func(t *testing.T) { - err := testData.pingBetweenInterfaces(t) - if err != nil { - t.Fatalf("Error when pinging between interfaces: %v", err) - } - }) -} - -func TestSriovNetwork(t *testing.T) { - // Create Pods on the control plane Node, assuming a single Node cluster for the SR-IOV - // test. - nodeName := antreae2e.NodeName(0) - pods := []*testPodInfo{ - { - podName: "sriov-pod1", - nodeName: nodeName, - interfaceNetworks: map[string]string{"eth1": "sriov-net1", "eth2": "sriov-net2"}, - }, - { - podName: "sriov-pod2", - nodeName: nodeName, - interfaceNetworks: map[string]string{"eth2": "sriov-net1", "eth3": "sriov-net3"}, - }, - { - podName: "sriov-pod3", - nodeName: nodeName, - interfaceNetworks: map[string]string{"eth4": "sriov-net1"}, - }, + err = testData.createPods(t, e2eTestData.GetTestNamespace()) + if err != nil { + t.Fatalf("Error when create test Pods: %v", err) + } + err = testData.pingBetweenInterfaces(t) + if err != nil { + t.Fatalf("Error when pinging between interfaces: %v", err) } - testSecondaryNetwork(t, networkTypeSriov, pods) } func TestVLANNetwork(t *testing.T) { @@ -288,3 +267,87 @@ func TestVLANNetwork(t *testing.T) { } testSecondaryNetwork(t, networkTypeVLAN, pods) } + +func (data *testData) assignIP(clientset *kubernetes.Clientset) error { + e2eTestData := data.e2eTestData + namespace := e2eTestData.GetTestNamespace() + + for _, testPod := range data.pods { + node, err := clientset.CoreV1().Nodes().Get(context.TODO(), testPod.nodeName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error when getting the cluster Node %s: %v", testPod.nodeName, err) + } + eni, has := node.Labels["eni-id"] + if !has { + return fmt.Errorf("the label `eni-id` not found in the cluster Node: %s", testPod.nodeName) + } + var podIP net.IP + _, err = e2eTestData.PodWaitFor(defaultTimeout, testPod.podName, namespace, func(pod *corev1.Pod) (bool, error) { + if pod.Status.Phase != corev1.PodRunning { + return false, nil + } + podIPs, err := data.listPodIPs(testPod) + if err != nil { + return false, err + } + ip, exists := podIPs["eth1"] + if !exists || ip == nil { + logs.Infof("IP not available for interface 'eth1' in Pod %s, retrying...", testPod.podName) + return false, nil + } + podIP = ip + + return true, nil + }) + if err := aws.AssignIPToEC2ENI(context.TODO(), eni, podIP.String()); err != nil { + return err + } + logs.Infof("assigned private IP address %s to interface %s", podIP, eni) + if err != nil { + return fmt.Errorf("error when waiting for the secondary IP for Pod %+v: %w", testPod, err) + } + } + return nil +} + +func TestSRIOVNetwork(t *testing.T) { + e2eTestData, err := antreae2e.SetupTest(t) + if err != nil { + t.Fatalf("Error when setting up test: %v", err) + } + defer antreae2e.TeardownTest(t, e2eTestData) + + node0Name := antreae2e.NodeName(0) + node1Name := antreae2e.NodeName(1) + pods := []*testPodInfo{ + { + podName: "sriov-pod1", + nodeName: node0Name, + interfaceNetworks: map[string]string{"eth1": "sriov-net1"}, + }, + { + podName: "sriov-pod2", + nodeName: node1Name, + interfaceNetworks: map[string]string{"eth1": "sriov-net1"}, + }, + } + + testData := &testData{e2eTestData: e2eTestData, networkType: networkTypeSriov, pods: pods} + + err = testData.createPods(t, e2eTestData.GetTestNamespace()) + if err != nil { + t.Fatalf("Error when create test Pods: %v", err) + } + clientset, err := kubernetes.NewForConfig(e2eTestData.KubeConfig) + if err != nil { + t.Fatalf("error when creating kubernetes client: %v", err) + } + err = testData.assignIP(clientset) + if err != nil { + t.Fatalf("Error when assign IP to ec2 instance: %v", err) + } + err = testData.pingBetweenInterfaces(t) + if err != nil { + t.Fatalf("Error when pinging between interfaces: %v", err) + } +} diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 8064723c2f2..a5ded7ffd64 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -252,7 +252,7 @@ type PodInfo struct { type TestData struct { ClusterName string provider providers.ProviderInterface - kubeConfig *restclient.Config + KubeConfig *restclient.Config clientset kubernetes.Interface aggregatorClient aggregatorclientset.Interface crdClient crdclientset.Interface @@ -1317,7 +1317,7 @@ func (data *TestData) CreateClient(kubeconfigPath string) error { if err != nil { return fmt.Errorf("error when creating CRD client: %v", err) } - data.kubeConfig = kubeConfig + data.KubeConfig = kubeConfig data.clientset = clientset data.aggregatorClient = aggregatorClient data.crdClient = crdClient @@ -2290,7 +2290,7 @@ func (data *TestData) RunCommandFromPod(podNamespace string, podName string, con Stderr: true, TTY: false, }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(data.kubeConfig, "POST", request.URL()) + exec, err := remotecommand.NewSPDYExecutor(data.KubeConfig, "POST", request.URL()) if err != nil { return "", "", err } @@ -2716,7 +2716,7 @@ func (data *TestData) mutateAntreaConfigMap( } configMap.Data["antrea-controller.conf"] = string(b) } - //getAgentConf should be able to process both windows and linux configmap. + // getAgentConf should be able to process both windows and linux configmap. getAgentConf := func(cm *corev1.ConfigMap) (*agentconfig.AgentConfig, error) { var agentConf agentconfig.AgentConfig if err := yaml.Unmarshal([]byte(cm.Data["antrea-agent.conf"]), &agentConf); err != nil { diff --git a/test/e2e/supportbundle_test.go b/test/e2e/supportbundle_test.go index d865e2bd41e..bf8fb9009d4 100644 --- a/test/e2e/supportbundle_test.go +++ b/test/e2e/supportbundle_test.go @@ -90,7 +90,7 @@ func testSupportBundle(name string, t *testing.T) { func getAndCheckSupportBundle(t *testing.T, name, podIP string, podPort int, token string, podName string, data *TestData) { // Setup clients. - localConfig := rest.CopyConfig(data.kubeConfig) + localConfig := rest.CopyConfig(data.KubeConfig) pf, err := portforwarder.NewPortForwarder(localConfig, metav1.NamespaceSystem, podName, podPort, "localhost", 8080) require.NoError(t, err) pf.Start() From 15ca6e8fadd440876341a058653a33ed833b9527 Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Fri, 21 Feb 2025 14:00:01 +0800 Subject: [PATCH 2/4] Update Signed-off-by: Wenqi Qiu --- ci/test-sriov-secondary-network-aws.sh | 5 ++--- .../secondary_network_test.go | 18 ++++++++---------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/ci/test-sriov-secondary-network-aws.sh b/ci/test-sriov-secondary-network-aws.sh index 2699ef6531c..e509b6cfeff 100755 --- a/ci/test-sriov-secondary-network-aws.sh +++ b/ci/test-sriov-secondary-network-aws.sh @@ -142,7 +142,7 @@ ANTREA_TAR="antrea-ubuntu.tar" DOCKER_IMAGE_PATH="$THIS_DIR/../$ANTREA_TAR" SRIOV_SECONDARY_NETWORKS_YAML="$THIS_DIR/../test/e2e-secondary-network/infra/sriov-secondary-networks.yml" IP_POOL_YAML="pool1.yaml" -NAD_YAML="$THIS_DIR/../test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml" +NETATTACH_YAML="$THIS_DIR/../test/e2e-secondary-network/infra/sriov-network-attachment-definition.yml" CONTROLPLANE_IP="" WORKER_IP="" @@ -469,7 +469,7 @@ EOF kubectl apply -f $IP_POOL_YAML # Create NetworkAttachmentDefinition - kubectl apply -f "$NAD_YAML" + kubectl apply -f "$NETATTACH_YAML" echo "Created NetworkAttachmentDefinition" } @@ -479,7 +479,6 @@ function run_test() { kubectl get nodes -o go-template='{{range .items}}{{.metadata.name}}{{" "}}{{.status.allocatable}}{{"\n"}}{{end}}' kubectl apply -f https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/raw/master/artifacts/networks-crd.yaml create_ippool_and_network_attachment_definition - kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true CONTROLPLANE_NODE=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].metadata.name}') WORKER_NODE=$(kubectl get nodes -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{.items[0].metadata.name}') kubectl label node "$CONTROLPLANE_NODE" eni-id="$CONTROLPLANE_NODE_ENI" diff --git a/test/e2e-secondary-network/secondary_network_test.go b/test/e2e-secondary-network/secondary_network_test.go index 9da3b61449e..b79d967e29a 100644 --- a/test/e2e-secondary-network/secondary_network_test.go +++ b/test/e2e-secondary-network/secondary_network_test.go @@ -84,7 +84,7 @@ func (data *testData) createPods(t *testing.T, ns string) error { for _, pod := range data.pods { err := data.createPodForSecondaryNetwork(ns, pod) if err != nil { - return fmt.Errorf("error in creating pods.., err: %v", err) + return fmt.Errorf("error in creating pods.., err: %w", err) } } return err @@ -192,7 +192,7 @@ func (data *testData) pingBetweenInterfaces(t *testing.T) error { return true, nil }) if err != nil { - return fmt.Errorf("error when waiting for secondary IPs for Pod %+v: %v", testPod, err) + return fmt.Errorf("error when waiting for secondary IPs for Pod %+v: %w", testPod, err) } } @@ -212,7 +212,7 @@ func (data *testData) pingBetweenInterfaces(t *testing.T) error { } if err := e2eTestData.RunPingCommandFromTestPod(antreae2e.PodInfo{Name: sourcePod.podName, OS: osType, NodeName: sourcePod.nodeName, Namespace: namespace}, namespace, &IPToPing, containerName, pingCount, pingSize, false); err != nil { - return fmt.Errorf("ping '%s' -> '%s'(Interface: %s, IP Address: %s) failed: %v", sourcePod.podName, targetPod.podName, targetAttachment.iface, targetAttachment.ip, err) + return fmt.Errorf("ping '%s' -> '%s'(Interface: %s, IP Address: %s) failed: %w", sourcePod.podName, targetPod.podName, targetAttachment.iface, targetAttachment.ip, err) } logs.Infof("ping '%s' -> '%s'( Interface: %s, IP Address: %s): OK", sourcePod.podName, targetPod.podName, targetAttachment.iface, targetAttachment.ip) } @@ -275,10 +275,10 @@ func (data *testData) assignIP(clientset *kubernetes.Clientset) error { for _, testPod := range data.pods { node, err := clientset.CoreV1().Nodes().Get(context.TODO(), testPod.nodeName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("error when getting the cluster Node %s: %v", testPod.nodeName, err) + return fmt.Errorf("error when getting the cluster Node %s: %w", testPod.nodeName, err) } - eni, has := node.Labels["eni-id"] - if !has { + eni, exists := node.Labels["eni-id"] + if !exists { return fmt.Errorf("the label `eni-id` not found in the cluster Node: %s", testPod.nodeName) } var podIP net.IP @@ -317,17 +317,15 @@ func TestSRIOVNetwork(t *testing.T) { } defer antreae2e.TeardownTest(t, e2eTestData) - node0Name := antreae2e.NodeName(0) - node1Name := antreae2e.NodeName(1) pods := []*testPodInfo{ { podName: "sriov-pod1", - nodeName: node0Name, + nodeName: antreae2e.NodeName(0), interfaceNetworks: map[string]string{"eth1": "sriov-net1"}, }, { podName: "sriov-pod2", - nodeName: node1Name, + nodeName: antreae2e.NodeName(1), interfaceNetworks: map[string]string{"eth1": "sriov-net1"}, }, } From dec73b2bd1866af0bcfbaa4f0ac74c37b11e7bf6 Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Fri, 21 Feb 2025 15:34:46 +0800 Subject: [PATCH 3/4] update Signed-off-by: Wenqi Qiu --- ci/test-sriov-secondary-network-aws.sh | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/ci/test-sriov-secondary-network-aws.sh b/ci/test-sriov-secondary-network-aws.sh index e509b6cfeff..7059743ecb5 100755 --- a/ci/test-sriov-secondary-network-aws.sh +++ b/ci/test-sriov-secondary-network-aws.sh @@ -234,12 +234,13 @@ function install_kubernetes() { done echo "Installing Kubernetes on node $node_ip..." ssh -o StrictHostKeyChecking=no -i "$AWS_EC2_SSH_KEY_NAME" ubuntu@"$node_ip" << EOF + set -x sudo apt update && sudo apt upgrade -y sudo apt install -y docker.io sudo docker --version sudo apt-get update - sudo apt-get install apt-transport-https ca-certificates curl gpg + sudo apt-get install apt-transport-https ca-certificates curl gpg -y sudo install -m 0755 -d /etc/apt/keyrings sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc @@ -262,6 +263,7 @@ function install_kubernetes() { sudo apt-mark hold kubelet kubeadm kubectl sudo swapoff -a sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab + sudo kubeadm version EOF } @@ -335,18 +337,8 @@ function setup_cluster { install_kubernetes "$CONTROLPLANE_IP" install_kubernetes "$WORKER_IP" - # Initialize Kubernetes control-plane Node, if it fails, it will not exit - initialize_control_plane_node "$CONTROLPLANE_IP" || { - echo "Failed to initialize control-plane Node. Re-running installation for both nodes." - # Re-run the install commands for both control-plane and worker nodes - install_kubernetes "$CONTROLPLANE_IP" - install_kubernetes "$WORKER_IP" - # After re-installing, attempt the initialization again - initialize_control_plane_node "$CONTROLPLANE_IP" || { - echo "Initialization ControlPlane failed again, please check logs." - exit 1 - } - } + # Initialize Kubernetes on control-plane Node + initialize_control_plane_node "$CONTROLPLANE_IP" # Get the join command and join the worker node to the cluster JOIN_COMMAND=$(get_join_command "$CONTROLPLANE_IP") @@ -479,6 +471,7 @@ function run_test() { kubectl get nodes -o go-template='{{range .items}}{{.metadata.name}}{{" "}}{{.status.allocatable}}{{"\n"}}{{end}}' kubectl apply -f https://github.com/k8snetworkplumbingwg/network-attachment-definition-client/raw/master/artifacts/networks-crd.yaml create_ippool_and_network_attachment_definition + kubectl taint nodes --all node-role.kubernetes.io/control-plane- || true CONTROLPLANE_NODE=$(kubectl get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].metadata.name}') WORKER_NODE=$(kubectl get nodes -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{.items[0].metadata.name}') kubectl label node "$CONTROLPLANE_NODE" eni-id="$CONTROLPLANE_NODE_ENI" From 4b086f3ca066d5f3bc0a4d1960e6c012c096af7b Mon Sep 17 00:00:00 2001 From: Wenqi Qiu Date: Mon, 24 Feb 2025 23:14:35 +0800 Subject: [PATCH 4/4] update Signed-off-by: Wenqi Qiu --- ci/test-sriov-secondary-network-aws.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/ci/test-sriov-secondary-network-aws.sh b/ci/test-sriov-secondary-network-aws.sh index 7059743ecb5..d1876973593 100755 --- a/ci/test-sriov-secondary-network-aws.sh +++ b/ci/test-sriov-secondary-network-aws.sh @@ -351,8 +351,6 @@ function setup_cluster { } function build_image() { - chmod -R g-w build/images/ovs - chmod -R g-w build/images/base ./hack/build-antrea-linux-all.sh --pull docker save antrea/antrea-agent-ubuntu:latest antrea/antrea-controller-ubuntu:latest -o $ANTREA_TAR }