Skip to content

Commit

Permalink
KUBE-263: Add eks access entries example (#286)
Browse files Browse the repository at this point in the history
Co-authored-by: gleb <[email protected]>
  • Loading branch information
stgleb and gleb authored Mar 28, 2024
1 parent 82863e5 commit e3a6448
Show file tree
Hide file tree
Showing 9 changed files with 306 additions and 0 deletions.
2 changes: 2 additions & 0 deletions castai/sdk/api.gen.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

32 changes: 32 additions & 0 deletions examples/eks/eks_cluster_access_entries/README.MD
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
## EKS and CAST AI example with Access entries as way of auth (Phase 2)

Following this example shows how to onboard an EKS cluster to CAST AI with CAST AI managed IAM policies.

IAM policies in the example are created by [castai/eks-iam-role/castai module](https://github.com/castai/terraform-castai-eks-role-iam).

Example configuration should be analysed in the following order:
1. Create VPC - `vpc.tf`
2. Create EKS cluster - `eks.tf`
3. Create IAM and other CAST AI related resources to connect EKS cluster to CAST AI (In Phase 2)- `castai.tf`

# Usage
1. Rename `tf.vars.example` to `tf.vars`
2. Update `tf.vars` file with your cluster name, cluster region and CAST AI API token
3. Initialize Terraform. Under example root folder run:
```
terraform init
```
4. Run Terraform apply:
```
terraform apply -var-file=tf.vars
```
5. To destroy resources created by this example:
```
terraform destroy -var-file=tf.vars
```

> **Note**
>
> Cluster access mode should be either `EKS API and ConfigMap` or `EKS API` in case of existing cluster.
Please refer to this guide if you run into any issues https://docs.cast.ai/docs/terraform-troubleshooting
85 changes: 85 additions & 0 deletions examples/eks/eks_cluster_access_entries/castai.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# 3. Connect EKS cluster to CAST AI.

locals {
role_name = "castai-eks-role"
}

# Configure Data sources and providers required for CAST AI connection.
data "aws_caller_identity" "current" {}

resource "castai_eks_user_arn" "castai_user_arn" {
cluster_id = castai_eks_clusterid.cluster_id.id
}

provider "castai" {
api_url = var.castai_api_url
api_token = var.castai_api_token
}

provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed.
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name, "--region", var.cluster_region]
}
}
}

# Create AWS IAM policies and a user to connect to CAST AI.
module "castai-eks-role-iam" {
source = "castai/eks-role-iam/castai"

aws_account_id = data.aws_caller_identity.current.account_id
aws_cluster_region = var.cluster_region
aws_cluster_name = var.cluster_name
aws_cluster_vpc_id = module.vpc.vpc_id

castai_user_arn = castai_eks_user_arn.castai_user_arn.arn

create_iam_resources_per_cluster = true
}

# Configure EKS cluster connection using CAST AI eks-cluster module.
resource "castai_eks_clusterid" "cluster_id" {
account_id = data.aws_caller_identity.current.account_id
region = var.cluster_region
cluster_name = var.cluster_name
}

module "castai-eks-cluster" {
source = "castai/eks-cluster/castai"
api_url = var.castai_api_url
castai_api_token = var.castai_api_token
grpc_url = var.castai_grpc_url
wait_for_cluster_ready = true

aws_account_id = data.aws_caller_identity.current.account_id
aws_cluster_region = var.cluster_region
aws_cluster_name = module.eks.cluster_name

aws_assume_role_arn = module.castai-eks-role-iam.role_arn
delete_nodes_on_disconnect = var.delete_nodes_on_disconnect

default_node_configuration = module.castai-eks-cluster.castai_node_configurations["default"]

node_configurations = {
default = {
subnets = module.vpc.private_subnets
tags = var.tags
security_groups = [
module.eks.cluster_security_group_id,
module.eks.node_security_group_id,
aws_security_group.additional.id,
]
instance_profile_arn = module.castai-eks-role-iam.instance_profile_arn
}
}

// depends_on helps Terraform with creating proper dependencies graph in case of resource creation and in this case destroy.
// module "castai-eks-cluster" has to be destroyed before module "castai-eks-role-iam".
depends_on = [module.castai-eks-role-iam]
}
74 changes: 74 additions & 0 deletions examples/eks/eks_cluster_access_entries/eks.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
# 2. Create EKS cluster.
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "20.8.3"
putin_khuylo = true

cluster_name = var.cluster_name
cluster_version = var.cluster_version
cluster_endpoint_public_access = true

cluster_addons = {
coredns = {
most_recent = true
}
kube-proxy = {
most_recent = true
}
vpc-cni = {
most_recent = true
}
}

vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets

authentication_mode = "API_AND_CONFIG_MAP"

self_managed_node_groups = {
node_group_1 = {
name = "${var.cluster_name}-ng-1"
instance_type = "m5.large"
max_size = 5
min_size = 2
desired_size = 2
}
}

eks_managed_node_groups = {
node_group_spot = {
name = "${var.cluster_name}-spot"
min_size = 1
max_size = 10
desired_size = 1

instance_types = ["t3.large"]
capacity_type = "SPOT"

update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
}
}
}

# Example additional security group.
resource "aws_security_group" "additional" {
name_prefix = "${var.cluster_name}-additional"
vpc_id = module.vpc.vpc_id

ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
]
}
}

resource "aws_eks_access_entry" "access_entry" {
cluster_name = var.cluster_name
principal_arn = module.castai-eks-role-iam.instance_profile_role_arn
type = "EC2_LINUX"
}
15 changes: 15 additions & 0 deletions examples/eks/eks_cluster_access_entries/providers.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Following providers required by EKS and VPC modules.
provider "aws" {
region = var.cluster_region
}

provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name, "--region", var.cluster_region]
}
}
3 changes: 3 additions & 0 deletions examples/eks/eks_cluster_access_entries/tf.vars.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
cluster_name = "<place-holder>"
cluster_region = "<place-holder>"
castai_api_token = "<place-holder>"
46 changes: 46 additions & 0 deletions examples/eks/eks_cluster_access_entries/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# EKS module variables.
variable "cluster_name" {
type = string
description = "EKS cluster name in AWS account."
}

variable "cluster_version" {
type = string
description = "EKS cluster name version."
default = "1.27"
}

variable "cluster_region" {
type = string
description = "AWS Region in which EKS cluster and supporting resources will be created."
}

variable "castai_api_url" {
type = string
description = "URL of alternative CAST AI API to be used during development or testing"
default = "https://api.cast.ai"
}

# Variables required for connecting EKS cluster to CAST AI.
variable "castai_api_token" {
type = string
description = "CAST AI API token created in console.cast.ai API Access keys section"
}

variable "castai_grpc_url" {
type = string
description = "CAST AI gRPC URL"
default = "grpc.cast.ai:443"
}

variable "delete_nodes_on_disconnect" {
type = bool
description = "Optional parameter, if set to true - CAST AI provisioned nodes will be deleted from cloud on cluster disconnection. For production use it is recommended to set it to false."
default = true
}

variable "tags" {
type = map(any)
description = "Optional tags for new cluster nodes. This parameter applies only to new nodes - tags for old nodes are not reconciled."
default = {}
}
17 changes: 17 additions & 0 deletions examples/eks/eks_cluster_access_entries/versions.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
terraform {
required_providers {
castai = {
source = "castai/castai"
}
kubernetes = {
source = "hashicorp/kubernetes"
}
helm = {
source = "hashicorp/helm"
}
aws = {
source = "hashicorp/aws"
}
}
required_version = ">= 0.13"
}
32 changes: 32 additions & 0 deletions examples/eks/eks_cluster_access_entries/vpc.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# 1. Create VPC.
data "aws_availability_zones" "available" {}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "5.0.0"

name = var.cluster_name
cidr = "10.0.0.0/16"

azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]

enable_nat_gateway = true
single_nat_gateway = true
one_nat_gateway_per_az = false

tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
}

public_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}

private_subnet_tags = {
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}

0 comments on commit e3a6448

Please sign in to comment.