diff --git a/Dockerfile b/Dockerfile
index dbd6d75..ab789c8 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,12 +1,12 @@
# Copyright © 2021-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
-ARG TERRAFORM_VERSION=1.8.5
-ARG GCP_CLI_VERSION=479.0.0
+ARG TERRAFORM_VERSION=1.9.6
+ARG GCP_CLI_VERSION=496.0.0
-FROM hashicorp/terraform:$TERRAFORM_VERSION as terraform
+FROM hashicorp/terraform:$TERRAFORM_VERSION AS terraform
FROM google/cloud-sdk:$GCP_CLI_VERSION-alpine
-ARG KUBECTL_VERSION=1.29.7
+ARG KUBECTL_VERSION=1.30.6
ARG ENABLE_GKE_GCLOUD_AUTH_PLUGIN=True
ARG INSTALL_COMPONENTS=""
@@ -18,14 +18,14 @@ COPY . .
RUN apk update \
&& apk upgrade --no-cache \
&& apk add --no-cache jq \
- && curl -sLO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl \
+ && curl -sLO https://dl.k8s.io/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl \
&& chmod 755 ./kubectl /viya4-iac-gcp/docker-entrypoint.sh \
&& mv ./kubectl /usr/local/bin/kubectl \
- && chmod g=u -R /etc/passwd /etc/group /viya4-iac-gcp \
&& git config --system --add safe.directory /viya4-iac-gcp \
&& terraform init \
&& gcloud components install gke-gcloud-auth-plugin alpha beta cloud-sql-proxy $INSTALL_COMPONENTS \
- && rm -rf /google-cloud-sdk/.install/.backup
+ && rm -rf /google-cloud-sdk/.install/.backup \
+ && chmod g=u -R /etc/passwd /etc/group /viya4-iac-gcp
ENV TF_VAR_iac_tooling=docker
ENV USE_GKE_GCLOUD_AUTH_PLUGIN=$ENABLE_GKE_GCLOUD_AUTH_PLUGIN
diff --git a/README.md b/README.md
index e6111d0..a4d06af 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,7 @@ This project contains Terraform scripts to provision Google Cloud infrastructure
>- Managed Google Kubernetes Engine (GKE) cluster
>- System and User GKE Node pools with required Labels and Taints
>- Infrastructure to deploy SAS Viya platform CAS in SMP or MPP mode
- >- Shared Storage options for SAS Viya platform - Google Filestore (ha) or NFS Server (standard)
+ >- Shared Storage options for SAS Viya platform - Google Filestore (ha), Google NetApp Volumes (ha) or NFS Server (standard)
>- Google Cloud SQL for PostgreSQL instance, optional
[](./docs/images/viya4-iac-gcp-diag.png?raw=true)
@@ -31,10 +31,10 @@ Operational knowledge of
- Terraform or Docker
- #### Terraform
- - [Terraform](https://www.terraform.io/downloads.html) - v1.8.5
- - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - v1.29.7
- - [jq](https://stedolan.github.io/jq/) - v1.6
- - [gcloud CLI](https://cloud.google.com/sdk/gcloud) - (optional - useful as an alternative to the Google Cloud Platform Portal) - v479.0.0
+ - [Terraform](https://www.terraform.io/downloads.html) - v1.9.6
+ - [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl) - v1.30.6
+ - [jq](https://stedolan.github.io/jq/) - v1.7
+ - [gcloud CLI](https://cloud.google.com/sdk/gcloud) - (optional - useful as an alternative to the Google Cloud Platform Portal) - v496.0.0
- [gke-gcloud-auth-plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) - (optional - only for provider based Kubernetes configuration files) - >= v1.26
- #### Docker
- [Docker](https://docs.docker.com/get-docker/)
diff --git a/config/sample-input-tf-enterprise.tfvars b/config/sample-input-tf-enterprise.tfvars
index 7ec4e52..599afd2 100644
--- a/config/sample-input-tf-enterprise.tfvars
+++ b/config/sample-input-tf-enterprise.tfvars
@@ -38,7 +38,7 @@ create_static_kubeconfig = true
# tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 1
default_nodepool_vm_type = "n2-highmem-8"
diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md
index 9f80726..50b2c53 100644
--- a/docs/CONFIG-VARS.md
+++ b/docs/CONFIG-VARS.md
@@ -17,7 +17,8 @@ Supported configuration variables are listed in the table below. All variables
- [Additional Nodepools](#additional-nodepools)
- [Storage](#storage)
- [For `storage_type=standard` only (NFS server VM)](#for-storage_typestandard-only-nfs-server-vm)
- - [For `storage_type=ha` only (Google Filestore)](#for-storage_typeha-only-google-filestore)
+ - [For `storage_type=ha` with Google Filestore](#for-storage_typeha-with-google-filestore)
+ - [For `storage_type=ha` with Google NetApp Volumes](#for-storage_typeha-with-google-netapp-volumes)
- [Google Artifact Registry (GAR) and Google Container Registry (GCR)](#google-artifact-registry-gar-and-google-container-registry-gcr)
- [Postgres Servers](#postgres-servers)
- [Monitoring](#monitoring)
@@ -65,10 +66,11 @@ You can use `default_public_access_cidrs` to set a default range for all created
| gke_subnet_cidr | Address space for the subnet for the GKE resources | string | "192.168.0.0/23" | This variable is ignored when `vpc_name` is set (aka bring your own vnet) |
| gke_pod_subnet_cidr | Secondary address space in the GKE subnet for Kubernetes Pods | string | "10.0.0.0/17" | This variable is ignored when `subnet_names` is set (aka bring your own subnets) |
| gke_service_subnet_cidr | Secondary address space in the GKE subnet for Kubernetes Services | string | "10.1.0.0/22" | This variable is ignored when `subnet_names` is set (aka bring your own subnets) |
-| gke_control_plane_subnet_cidr | Address space for the hosted master subnet | string | "10.2.0.0/28" | When providing your own subnets (by setting `subnet_names` make sure your subnets do not overlap this range |
+| gke_control_plane_subnet_cidr | Address space for the hosted primary subnet | string | "10.2.0.0/28" | When providing your own subnets (by setting `subnet_names` make sure your subnets do not overlap this range |
| misc_subnet_cidr | Address space for the the auxiliary resources (Jump VM and optionally NFS VM) subnet | string | "192.168.2.0/24" | This variable is ignored when `subnet_names` is set (aka bring your own subnet) |
| filestore_subnet_cidr | Address space for Google Filestore subnet | string | "192.168.3.0/29" | Needs to be at least a /29 range. Only used when `storage_type="ha"` |
| database_subnet_cidr | Address space for Google Cloud SQL Postgres subnet | string | "192.168.4.0/23" | Only used with external postgres |
+| netapp_subnet_cidr | Address space for Google Cloud NetApp Volumes subnet | string | "192.168.5.0/24" | Needs to be at least a /24 range. Only used when `storage_type="ha"` and `storage_type_backend="netapp"` |
### Use Existing
@@ -212,6 +214,7 @@ stateful = {
| Name | Description | Type | Default | Notes |
| :--- | ---: | ---: | ---: | ---: |
| storage_type | Type of Storage. Valid Values: "standard", "ha" | string | "standard" | "standard" creates NFS server VM, "ha" Google Filestore instance |
+| storage_type_backend | The storage backend for the chosen `storage_type`. | string | If `storage_type=standard` the default is "nfs";
If `storage_type=ha` the default is "filestore" | Valid Values: "nfs" if `storage_type=standard`; "filestore" or "netapp" if `storage_type=ha` |
### For `storage_type=standard` only (NFS server VM)
@@ -221,13 +224,27 @@ stateful = {
| nfs_vm_admin | OS Admin User for the NFS server VM | string | "nfsuser" | The NFS server VM is only created when storage_type="standard" |
| nfs_raid_disk_size | Size in Gb for each disk of the RAID5 cluster on the NFS server VM | number | 1000 | The NFS server VM is only created when storage_type="standard" |
-### For `storage_type=ha` only (Google Filestore)
+### For `storage_type=ha` with Google Filestore
| Name | Description | Type | Default | Notes |
| :--- | ---: | ---: | ---: | ---: |
| filestore_tier | The service tier for the Google Filestore Instance | string | "BASIC_HDD" | Valid Values: "BASIC_HDD", "BASIC_SSD" (previously called "STANDARD" and "PREMIUM" respectively.) |
| filestore_size_in_gb | Size in GB of Filesystem in the Google Filestore Instance | number | 1024 for BASIC_HDD, 2560 for BASIC_SDD | 2560 GB is the minimum size for the BASIC_SSD tier. The BASIC_HDD tier allows a minimum size of 1024 GB. |
+### For `storage_type=ha` with Google NetApp Volumes
+
+When `storage_type=ha` and `storage_type_backend=netapp` are specified, [Google NetApp Volumes](https://cloud.google.com/netapp/volumes/docs/discover/overview) service is created. Before using this storage option,
+- Enable the Google Cloud NetApp Volumes API for your project, see how to enable [here](https://cloud.google.com/netapp/volumes/docs/get-started/configure-access/initiate-console-settings#enable_the_api).
+- Grant access to NetApp Volumes operations by granting IAM roles to users. The two predefined roles are `roles/netapp.admin` and `roles/netapp.viewer`. You can assign these roles to specific users or service accounts.
+- NetApp Volumes is available in several regions. For details about region availability, see [NetApp Volumes locations](https://cloud.google.com/netapp/volumes/docs/locations).
+
+| Name | Description | Type | Default | Notes |
+| :--- | ---: | ---: | ---: | ---: |
+| netapp_service_level | The service level of the storage pool. | string | "PREMIUM" | Valid Values are: PREMIUM, EXTREME, STANDARD, FLEX. |
+| netapp_protocols | The target volume protocol expressed as a list. | list(string) | ["NFSV3"] | Each value may be one of: NFSV3, NFSV4, SMB. Currently, only NFSV3 is supported by SAS Viya Platform. |
+| netapp_capacity_gib | Capacity of the storage pool (in GiB). Storage Pool capacity specified must be between 2048 GiB and 10485760 GiB. | string | "2048" | |
+| netapp_volume_path | A unique file path for the volume. Used when creating mount targets. Needs to be unique per location.| string | | |
+
## Google Artifact Registry (GAR) and Google Container Registry (GCR)
| Name | Description | Type | Default | Notes |
@@ -267,8 +284,8 @@ Each server element, like `foo = {}`, can contain none, some, or all of the para
| administrator_password | The Password associated with the administrator_login for the PostgreSQL Server | string | "my$up3rS3cretPassw0rd" | |
| server_version | The version of the PostgreSQL server instance | string | "15" | Refer to the [SAS Viya Platform Administration Guide](https://documentation.sas.com/?cdcId=sasadmincdc&cdcVersion=default&docsetId=itopssr&docsetTarget=p05lfgkwib3zxbn1t6nyihexp12n.htm#p1wq8ouke3c6ixn1la636df9oa1u) for the supported versions of PostgreSQL for the SAS Viya platform. |
| ssl_enforcement_enabled | Enforce SSL on connection to the PostgreSQL database | bool | true | |
-| availability_type | The availability type for the master instance. | string | "ZONAL" | This is only used to set up high availability for the PostgreSQL instance. Can be either `ZONAL` or `REGIONAL`. |
-| database_flags | Database flags for the master instance. | list(object({})) | | More details can be found [here](https://cloud.google.com/sql/docs/postgres/flags) |
+| availability_type | The availability type for the primary instance. | string | "ZONAL" | This is only used to set up high availability for the PostgreSQL instance. Can be either `ZONAL` or `REGIONAL`. |
+| database_flags | Database flags for the primary instance. | list(object({})) | | More details can be found [here](https://cloud.google.com/sql/docs/postgres/flags) |
Multiple SAS offerings require a second PostgreSQL instance referred to as SAS Common Data Store, or CDS PostgreSQL. For more information, see [Common Customizations](https://documentation.sas.com/?cdcId=itopscdc&cdcVersion=default&docsetId=dplyml0phy0dkr&docsetTarget=n08u2yg8tdkb4jn18u8zsi6yfv3d.htm#p0wkxxi9s38zbzn19ukjjaxsc0kl). A list of SAS offerings that require CDS PostgreSQL is provided in [SAS Common Data Store Requirements](https://documentation.sas.com/?cdcId=itopscdc&cdcVersion=default&docsetId=itopssr&docsetTarget=p05lfgkwib3zxbn1t6nyihexp12n.htm#n03wzanutmc6gon1val5fykas9aa). To create and configure an external CDS PostgreSQL instance in addition to the external platform PostgreSQL instance named `default`, specify `cds-postgres` as a second PostgreSQL instance, as shown in the example below.
@@ -306,4 +323,4 @@ postgres_servers = {
| gke_monitoring_enabled_components | List of services to monitor: SYSTEM_COMPONENTS, WORKLOADS (WORKLOADS deprecated in 1.24). | list of strings | ["SYSTEM_COMPONENTS"] | |
| enable_managed_prometheus | Enable Google Cloud [Managed Service for Prometheus](https://cloud.google.com/stackdriver/docs/managed-prometheus) for your cluster | boolean | false | |
-Note: For additional details about Google Kubernetes Engine (GKE) integration with Cloud Logging and Cloud Monitoring, including Google Cloud [Managed Service for Prometheus](https://cloud.google.com/stackdriver/docs/managed-prometheus), view the ["Overview of Google Cloud's operations suite for GKE" documentation](https://cloud.google.com/stackdriver/docs/solutions/gke)
\ No newline at end of file
+Note: For additional details about Google Kubernetes Engine (GKE) integration with Cloud Logging and Cloud Monitoring, including Google Cloud [Managed Service for Prometheus](https://cloud.google.com/stackdriver/docs/managed-prometheus), view the ["Overview of Google Cloud's operations suite for GKE" documentation](https://cloud.google.com/stackdriver/docs/solutions/gke)
\ No newline at end of file
diff --git a/docs/user/TerraformGCPAuthentication.md b/docs/user/TerraformGCPAuthentication.md
index 441cd12..0a71727 100644
--- a/docs/user/TerraformGCPAuthentication.md
+++ b/docs/user/TerraformGCPAuthentication.md
@@ -39,7 +39,9 @@ The Service Account will need the following [IAM roles](https://cloud.google.com
| `roles/container.admin` | Kubernetes Engine Admin | Cluster creation |
| `roles/container.clusterAdmin` | Kubernetes Engine Cluster Admin | Terraform Kubernetes Engine Module |
| `roles/container.developer` | Kubernetes Engine Developer | Cluster creation |
-| `roles/file.editor` | Cloud Filestore Editor | Needed for [`storage_type=="HA"`](../CONFIG-VARS.md#storage) |
+| `roles/file.editor` | Cloud Filestore Editor | Needed for [`storage_type=="ha" && storage_type_backend = "filestore"`](../CONFIG-VARS.md#storage) |
+| `roles/netapp.admin` | NetApp Admin | Needed for [`storage_type=="ha" && storage_type_backend = "netapp"`](../CONFIG-VARS.md#storage) |
+| `roles/netapp.viewer` | NetApp Viewer | Needed for [`storage_type=="ha" && storage_type_backend = "netapp"`](../CONFIG-VARS.md#storage) |
| `roles/iam.serviceAccountAdmin` | Service Account Admin | Terraform Kubernetes Engine Module |
| `roles/iam.serviceAccountUser` | Service Account User | Terraform Kubernetes Engine Module |
| `roles/resourcemanager.projectIamAdmin` | Project IAM Admin | Terraform Kubernetes Engine Module |
@@ -59,6 +61,8 @@ gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAM
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/container.clusterAdmin
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/container.developer
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/file.editor
+gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/netapp.admin
+gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/netapp.viewer
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/iam.serviceAccountAdmin
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/iam.serviceAccountUser
gcloud projects add-iam-policy-binding $PROJECT --member serviceAccount:${SA_NAME}@${PROJECT}.iam.gserviceaccount.com --role roles/resourcemanager.projectIamAdmin
diff --git a/examples/sample-input-byo.tfvars b/examples/sample-input-byo.tfvars
index efb1fad..b17b7a1 100644
--- a/examples/sample-input-byo.tfvars
+++ b/examples/sample-input-byo.tfvars
@@ -37,7 +37,7 @@ postgres_servers = {
}
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 2
default_nodepool_vm_type = "n2-highmem-8"
diff --git a/examples/sample-input-connect.tfvars b/examples/sample-input-connect.tfvars
index 32fb390..897c65f 100644
--- a/examples/sample-input-connect.tfvars
+++ b/examples/sample-input-connect.tfvars
@@ -27,7 +27,7 @@ postgres_servers = {
}
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 2
default_nodepool_vm_type = "n2-highmem-8"
diff --git a/examples/sample-input-ha.tfvars b/examples/sample-input-ha.tfvars
index 7c3f57a..8576eed 100644
--- a/examples/sample-input-ha.tfvars
+++ b/examples/sample-input-ha.tfvars
@@ -27,7 +27,7 @@ postgres_servers = {
}
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 2
default_nodepool_vm_type = "n2-highmem-8"
@@ -94,5 +94,6 @@ jump_vm_admin = "jumpuser"
# Storage for Viya Compute Services
# Supported storage_type values
# "standard" - Custom managed NFS Server VM and disks
-# "ha" - Google Filestore
+# "ha" - Google Filestore or Google NetApp Volumes
storage_type = "ha"
+storage_type_backend = "filestore" # "filestore" is the default, use "netapp" to create Google NetApp Volumes
diff --git a/examples/sample-input-minimal.tfvars b/examples/sample-input-minimal.tfvars
index e19de38..24f8898 100644
--- a/examples/sample-input-minimal.tfvars
+++ b/examples/sample-input-minimal.tfvars
@@ -27,7 +27,7 @@ tags = {} # e.g., { "key1" = "value1", "key2" = "value2" }
# }
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 1
default_nodepool_vm_type = "n2-highmem-8"
diff --git a/examples/sample-input.tfvars b/examples/sample-input.tfvars
index 0b4c587..9e3d089 100644
--- a/examples/sample-input.tfvars
+++ b/examples/sample-input.tfvars
@@ -27,7 +27,7 @@ postgres_servers = {
}
# GKE config
-kubernetes_version = "1.29"
+kubernetes_version = "1.30"
default_nodepool_min_nodes = 2
default_nodepool_vm_type = "n2-highmem-8"
diff --git a/linting-configs/.tflint.hcl b/linting-configs/.tflint.hcl
index 2fa8567..ffac0eb 100644
--- a/linting-configs/.tflint.hcl
+++ b/linting-configs/.tflint.hcl
@@ -8,7 +8,7 @@
config {
# Enables module inspection.
- module = true
+ call_module_type = "all"
}
plugin "google" {
diff --git a/locals.tf b/locals.tf
index 763bb18..e184431 100644
--- a/locals.tf
+++ b/locals.tf
@@ -25,6 +25,12 @@ locals {
: null
)
+ # Storage
+ storage_type_backend = (var.storage_type == "none" ? "none"
+ : var.storage_type == "standard" ? "nfs"
+ : var.storage_type == "ha" && var.storage_type_backend == "netapp" ? "netapp"
+ : var.storage_type == "ha" ? "filestore" : "none")
+
# Kubernetes
kubeconfig_path = var.iac_tooling == "docker" ? "/workspace/${var.prefix}-gke-kubeconfig.conf" : "${var.prefix}-gke-kubeconfig.conf"
diff --git a/main.tf b/main.tf
index 5b8f47c..5b232a8 100644
--- a/main.tf
+++ b/main.tf
@@ -66,7 +66,7 @@ EOT
resource "google_filestore_instance" "rwx" {
name = "${var.prefix}-rwx-filestore"
- count = var.storage_type == "ha" ? 1 : 0
+ count = var.storage_type == "ha" && local.storage_type_backend == "filestore" ? 1 : 0
tier = upper(var.filestore_tier)
location = local.zone
labels = var.tags
@@ -91,7 +91,7 @@ data "google_container_engine_versions" "gke-version" {
module "gke" {
source = "terraform-google-modules/kubernetes-engine/google//modules/private-cluster"
- version = "~> 31.0.0"
+ version = "~> 33.1.0"
project_id = var.project
name = "${var.prefix}-gke"
region = local.region
@@ -241,7 +241,7 @@ resource "local_file" "kubeconfig" {
# Module Registry - https://registry.terraform.io/modules/GoogleCloudPlatform/sql-db/google/12.0.0/submodules/postgresql
module "postgresql" {
source = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
- version = "~> 20.1.0"
+ version = "~> 22.1.0"
project_id = var.project
for_each = local.postgres_servers != null ? length(local.postgres_servers) != 0 ? local.postgres_servers : {} : {}
@@ -293,7 +293,7 @@ module "postgresql" {
module "sql_proxy_sa" {
source = "terraform-google-modules/service-accounts/google"
- version = "~> 4.2.2"
+ version = "~> 4.4.0"
count = var.postgres_servers != null ? length(var.postgres_servers) != 0 ? 1 : 0 : 0
project_id = var.project
prefix = var.prefix
@@ -301,3 +301,19 @@ module "sql_proxy_sa" {
project_roles = ["${var.project}=>roles/cloudsql.admin"]
display_name = "IAC-managed service account for cluster ${var.prefix} and sql-proxy integration."
}
+
+module "google_netapp" {
+ source = "./modules/google_netapp"
+
+ count = var.storage_type == "ha" && local.storage_type_backend == "netapp" ? 1 : 0
+
+ prefix = var.prefix
+ region = local.region
+ network = module.vpc.network_name
+ netapp_subnet_cidr = var.netapp_subnet_cidr
+ service_level = var.netapp_service_level
+ capacity_gib = var.netapp_capacity_gib
+ protocols = var.netapp_protocols
+ volume_path = "${var.prefix}-${var.netapp_volume_path}"
+ allowed_clients = join(",", [local.gke_subnet_cidr, local.misc_subnet_cidr])
+}
diff --git a/modules/google_netapp/main.tf b/modules/google_netapp/main.tf
new file mode 100644
index 0000000..3ec9c3b
--- /dev/null
+++ b/modules/google_netapp/main.tf
@@ -0,0 +1,70 @@
+# Copyright © 2021-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+#
+# Terraform Registry : https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/netapp_volume
+# GitHub Repository : https://github.com/terraform-google-modules
+#
+
+# Reserve compute address CIDR for NetApp Volumes to use
+resource "google_compute_global_address" "private_ip_alloc" {
+ name = "${var.network}-ip-range"
+ purpose = "VPC_PEERING"
+ address_type = "INTERNAL"
+ address = split("/", var.netapp_subnet_cidr)[0]
+ prefix_length = split("/", var.netapp_subnet_cidr)[1]
+ network = var.network
+}
+
+# Create the PSA peering
+resource "google_service_networking_connection" "default" {
+ network = var.network
+ service = "netapp.servicenetworking.goog"
+ reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name]
+
+ deletion_policy = "ABANDON"
+}
+
+# Modify the PSA Connection to allow import/export of custom routes
+resource "google_compute_network_peering_routes_config" "route_updates" {
+ peering = google_service_networking_connection.default.peering
+ network = var.network
+
+ import_custom_routes = true
+ export_custom_routes = true
+}
+
+resource "google_netapp_storage_pool" "netapp-tf-pool" {
+ name = "${var.prefix}-netapp-storage-pool"
+ location = var.region
+ service_level = var.service_level
+ capacity_gib = var.capacity_gib
+ network = var.network
+
+ lifecycle {
+ ignore_changes = [network]
+ }
+}
+
+resource "google_netapp_volume" "netapp-nfs-volume" {
+ location = var.region
+ name = "${var.prefix}-netapp-volume"
+ capacity_gib = var.capacity_gib # Size can be up to space available in pool
+ share_name = var.volume_path
+ storage_pool = google_netapp_storage_pool.netapp-tf-pool.name
+ protocols = var.protocols
+ unix_permissions = "0777"
+ export_policy {
+ rules {
+ access_type = "READ_WRITE"
+ allowed_clients = var.allowed_clients
+ has_root_access = true
+ nfsv3 = contains(var.protocols, "NFSV3") ? true : false
+ nfsv4 = contains(var.protocols, "NFSV4") ? true : false
+ }
+ }
+
+ depends_on = [
+ google_netapp_storage_pool.netapp-tf-pool,
+ google_service_networking_connection.default
+ ]
+}
diff --git a/modules/google_netapp/outputs.tf b/modules/google_netapp/outputs.tf
new file mode 100644
index 0000000..71fc2df
--- /dev/null
+++ b/modules/google_netapp/outputs.tf
@@ -0,0 +1,7 @@
+output "mountpath" {
+ value = google_netapp_volume.netapp-nfs-volume.mount_options[0].export
+}
+
+output "export_ip" {
+ value = split(":", google_netapp_volume.netapp-nfs-volume.mount_options[0].export_full)[0]
+}
diff --git a/modules/google_netapp/variables.tf b/modules/google_netapp/variables.tf
new file mode 100644
index 0000000..18ed8ba
--- /dev/null
+++ b/modules/google_netapp/variables.tf
@@ -0,0 +1,54 @@
+# Copyright © 2021-2024, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
+# SPDX-License-Identifier: Apache-2.0
+
+variable "prefix" {
+ description = "A prefix used in the name for all cloud resources created by this script. The prefix string must start with lowercase letter and contain only lowercase alphanumeric characters and hyphen or dash(-), but can not start or end with '-'."
+ type = string
+}
+
+variable "region" {
+ description = "The region to create the VM in"
+ type = string
+}
+
+variable "service_level" {
+ description = "Service level of the storage pool. Possible values are: PREMIUM, EXTREME, STANDARD, FLEX."
+ type = string
+ default = "PREMIUM"
+}
+
+variable "protocols" {
+ description = "The target volume protocol expressed as a list. Allowed combinations are ['NFSV3'], ['NFSV4'], ['SMB'], ['NFSV3', 'NFSV4'], ['SMB', 'NFSV3'] and ['SMB', 'NFSV4']. Each value may be one of: NFSV3, NFSV4, SMB."
+ type = list(string)
+ default = ["NFSV3"]
+}
+
+variable "capacity_gib" {
+ description = "Capacity of the storage pool (in GiB)."
+ type = string
+ default = 2048
+}
+
+variable "volume_path" {
+ description = "A unique file path for the volume. Used when creating mount targets. Needs to be unique per location."
+ type = string
+ default = "export"
+}
+
+variable "network" {
+ description = "VPC network name with format: `projects/{{project}}/global/networks/{{network}}`"
+ type = string
+}
+
+
+variable "allowed_clients" {
+ description = "CIDR blocks allowed to mount nfs exports"
+ type = string
+ default = "0.0.0.0/0"
+}
+
+variable "netapp_subnet_cidr" {
+ description = "Address space for Google Cloud NetApp Volumes subnet"
+ type = string
+ default = "192.168.5.0/24"
+}
diff --git a/modules/google_vm/main.tf b/modules/google_vm/main.tf
index 21be6e9..f66cd92 100755
--- a/modules/google_vm/main.tf
+++ b/modules/google_vm/main.tf
@@ -3,7 +3,7 @@
module "address" {
source = "terraform-google-modules/address/google"
- version = "~> 4.0.0"
+ version = "~> 4.1.0"
project_id = var.project
region = var.region
address_type = "EXTERNAL"
diff --git a/network.tf b/network.tf
index 511accc..939449b 100644
--- a/network.tf
+++ b/network.tf
@@ -11,7 +11,7 @@ data "google_compute_address" "nat_address" {
module "nat_address" {
count = length(var.nat_address_name) == 0 ? 1 : 0
source = "terraform-google-modules/address/google"
- version = "~> 4.0.0"
+ version = "~> 4.1.0"
project_id = var.project
region = local.region
address_type = "EXTERNAL"
@@ -23,7 +23,7 @@ module "nat_address" {
module "cloud_nat" {
count = length(var.nat_address_name) == 0 ? 1 : 0
source = "terraform-google-modules/cloud-nat/google"
- version = "~> 5.1.0"
+ version = "~> 5.3.0"
project_id = var.project
name = "${var.prefix}-cloud-nat"
region = local.region
@@ -72,7 +72,7 @@ resource "google_service_networking_connection" "private_vpc_connection" {
# required as of hashicorp/google v5.12.0 when using google_service_networking_connection in
# conjunction with CloudSQL instances in order to cleanly delete resources
# https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/service_networking_connection
- deletion_policy = "ABANDON"
+ deletion_policy = "ABANDON"
}
resource "google_compute_firewall" "nfs_vm_cluster_firewall" {
diff --git a/outputs.tf b/outputs.tf
index aa95b10..6493a77 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -27,7 +27,8 @@ output "rwx_filestore_endpoint" {
description = "Shared Storage private IP"
value = (var.storage_type == "none"
? null
- : var.storage_type == "ha" ? google_filestore_instance.rwx[0].networks[0].ip_addresses[0] : module.nfs_server[0].private_ip
+ : var.storage_type == "ha" && local.storage_type_backend == "filestore" ? google_filestore_instance.rwx[0].networks[0].ip_addresses[0]
+ : var.storage_type == "ha" && local.storage_type_backend == "netapp" ? module.google_netapp[0].export_ip : module.nfs_server[0].private_ip
)
}
@@ -35,7 +36,8 @@ output "rwx_filestore_path" {
description = "Shared Storage mount path"
value = (var.storage_type == "none"
? null
- : var.storage_type == "ha" ? "/${google_filestore_instance.rwx[0].file_shares[0].name}" : "/export"
+ : var.storage_type == "ha" && local.storage_type_backend == "filestore" ? "/${google_filestore_instance.rwx[0].file_shares[0].name}"
+ : var.storage_type == "ha" && local.storage_type_backend == "netapp" ? module.google_netapp[0].mountpath : "/export"
)
}
diff --git a/variables.tf b/variables.tf
index e50c25c..53ad94b 100644
--- a/variables.tf
+++ b/variables.tf
@@ -12,9 +12,9 @@ variable "prefix" {
variable "location" {
description = <