Skip to content

Commit

Permalink
Feature: allow the use of private networks
Browse files Browse the repository at this point in the history
  • Loading branch information
xavierleune committed Nov 22, 2024
1 parent d2d4423 commit e187c93
Show file tree
Hide file tree
Showing 14 changed files with 175 additions and 33 deletions.
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -993,6 +993,30 @@ enable_delete_protection = {
```

</details>
<details>

<summary>Use only private ips in your cluster</summary>

To use only private ips on your cluster, you need in your project:
1. A network already configured.
2. A machine with a public IP, with nat configured (see [Hetzner guide](https://community.hetzner.com/tutorials/how-to-set-up-nat-for-cloud-networks)).
3. Access to your network (you can use wireguard, see [Hetnzer guide](https://docs.hetzner.com/cloud/apps/list/wireguard/)).
4. A route in your network, destination: `0.0.0.0/0` through the private ip of your machine with NAT.
5. Make sure the connexion to your vpn is established before launching terraform.

Recommended values:
- Network range: `10.0.0.0/8`
- Subnet for your wireguard and NAT machine: `10.128.0.0/16`

If you follow this values, in your kube.tf, please set:
- `existing_network_id = [YOURID]` (with the brackets)
- `network_ipv4_cidr = "10.0.0.0/9"`
- Add `disable_ipv4 = true` and `disable_ipv6 = true` in all machines in all nodepools (control planes + agents).

This setup is compatible with a loadbalancer for your control planes, however you should consider to set
`control_plane_lb_enable_public_interface = false` to keep ip private.
</details>


## Debugging

Expand Down
21 changes: 16 additions & 5 deletions agents.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ module "agents" {
swap_size = each.value.swap_size
zram_size = each.value.zram_size
keep_disk_size = var.keep_disk_agents
disable_ipv4 = each.value.disable_ipv4
disable_ipv6 = each.value.disable_ipv6
network_id = length(var.existing_network_id) > 0 ? var.existing_network_id[0] : 0

private_ipv4 = cidrhost(hcloud_network_subnet.agent[[for i, v in var.agent_nodepools : i if v.name == each.value.nodepool_name][0]].ip_range, each.value.index + 101)

Expand Down Expand Up @@ -57,6 +60,14 @@ locals {
var.agent_nodes_custom_config,
(v.selinux == true ? { selinux = true } : {})
) }

agent_ips = {
for k, v in module.agents : k => coalesce(
v.ipv4_address,
v.ipv6_address,
v.private_ipv4_address
)
}
}

resource "null_resource" "agent_config" {
Expand All @@ -71,7 +82,7 @@ resource "null_resource" "agent_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.agents[each.key].ipv4_address
host = local.agent_ips[each.key]
port = var.ssh_port
}

Expand All @@ -97,7 +108,7 @@ resource "null_resource" "agents" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.agents[each.key].ipv4_address
host = local.agent_ips[each.key]
port = var.ssh_port
}

Expand Down Expand Up @@ -166,7 +177,7 @@ resource "null_resource" "configure_longhorn_volume" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.agents[each.key].ipv4_address
host = local.agent_ips[each.key]
port = var.ssh_port
}

Expand Down Expand Up @@ -215,7 +226,7 @@ resource "null_resource" "configure_floating_ip" {
NM_CONNECTION=$(nmcli -g GENERAL.CONNECTION device show eth0)
nmcli connection modify "$NM_CONNECTION" \
ipv4.method manual \
ipv4.addresses ${hcloud_floating_ip.agents[each.key].ip_address}/32,${module.agents[each.key].ipv4_address}/32 gw4 172.31.1.1 \
ipv4.addresses ${hcloud_floating_ip.agents[each.key].ip_address}/32,${local.agent_ips[each.key]}/32 gw4 172.31.1.1 \
ipv4.route-metric 100 \
&& nmcli connection up "$NM_CONNECTION"
EOT
Expand All @@ -226,7 +237,7 @@ resource "null_resource" "configure_floating_ip" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.agents[each.key].ipv4_address
host = local.agent_ips[each.key]
port = var.ssh_port
}

Expand Down
4 changes: 2 additions & 2 deletions autoscaler-agents.tf
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ resource "null_resource" "configure_autoscaler" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = coalesce(module.control_planes[keys(module.control_planes)[0]].ipv4_address, module.control_planes[keys(module.control_planes)[0]].ipv6_address, module.control_planes[keys(module.control_planes)[0]].private_ipv4_address)
port = var.ssh_port
}

Expand Down Expand Up @@ -165,7 +165,7 @@ resource "null_resource" "autoscaled_nodes_registries" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = each.value.ipv4_address
host = coalesce(each.value.ipv4_address, each.value.ipv6_address, one(each.value.network).ip)
port = var.ssh_port
}

Expand Down
17 changes: 14 additions & 3 deletions control_planes.tf
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ module "control_planes" {
swap_size = each.value.swap_size
zram_size = each.value.zram_size
keep_disk_size = var.keep_disk_cp
disable_ipv4 = each.value.disable_ipv4
disable_ipv6 = each.value.disable_ipv6
network_id = length(var.existing_network_id) > 0 ? var.existing_network_id[0] : 0

# We leave some room so 100 eventual Hetzner LBs that can be created perfectly safely
# It leaves the subnet with 254 x 254 - 100 = 64416 IPs to use, so probably enough.
Expand Down Expand Up @@ -83,6 +86,14 @@ resource "hcloud_load_balancer_service" "control_plane" {
}

locals {
control_plane_ips = {
for k, v in module.control_planes : k => coalesce(
v.ipv4_address,
v.ipv6_address,
v.private_ipv4_address
)
}

k3s-config = { for k, v in local.control_plane_nodes : k => merge(
{
node-name = module.control_planes[k].name
Expand Down Expand Up @@ -134,7 +145,7 @@ resource "null_resource" "control_plane_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[each.key].ipv4_address
host = local.control_plane_ips[each.key]
port = var.ssh_port
}

Expand Down Expand Up @@ -167,7 +178,7 @@ resource "null_resource" "authentication_config" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[each.key].ipv4_address
host = local.control_plane_ips[each.key]
port = var.ssh_port
}

Expand Down Expand Up @@ -197,7 +208,7 @@ resource "null_resource" "control_planes" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[each.key].ipv4_address
host = local.control_plane_ips[each.key]
port = var.ssh_port
}

Expand Down
30 changes: 27 additions & 3 deletions init.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,37 @@ resource "hcloud_load_balancer" "cluster" {
}
}

resource "hcloud_load_balancer_network" "cluster" {
count = local.has_external_load_balancer ? 0 : 1

load_balancer_id = hcloud_load_balancer.cluster.*.id[0]
subnet_id = hcloud_network_subnet.agent.*.id[0]
}

resource "hcloud_load_balancer_target" "cluster" {
count = local.has_external_load_balancer ? 0 : 1

depends_on = [hcloud_load_balancer_network.cluster]
type = "label_selector"
load_balancer_id = hcloud_load_balancer.cluster.*.id[0]
label_selector = join(",", [for k, v in merge(local.labels, local.labels_control_plane_node, local.labels_agent_node) : "${k}=${v}"])
use_private_ip = true
}

locals {
first_control_plane_ip = coalesce(
module.control_planes[keys(module.control_planes)[0]].ipv4_address,
module.control_planes[keys(module.control_planes)[0]].ipv6_address,
module.control_planes[keys(module.control_planes)[0]].private_ipv4_address
)
}

resource "null_resource" "first_control_plane" {
connection {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = local.first_control_plane_ip
port = var.ssh_port
}

Expand Down Expand Up @@ -55,7 +79,7 @@ resource "null_resource" "first_control_plane" {
var.use_control_plane_lb ? {
tls-san = concat([hcloud_load_balancer.control_plane.*.ipv4[0], hcloud_load_balancer_network.control_plane.*.ip[0]], var.additional_tls_sans)
} : {
tls-san = concat([module.control_planes[keys(module.control_planes)[0]].ipv4_address], var.additional_tls_sans)
tls-san = concat([local.first_control_plane_ip], var.additional_tls_sans)
},
local.etcd_s3_snapshots,
var.control_planes_custom_config,
Expand Down Expand Up @@ -149,7 +173,7 @@ resource "null_resource" "kustomization" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = local.first_control_plane_ip
port = var.ssh_port
}

Expand Down
12 changes: 12 additions & 0 deletions kube.tf.example
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,10 @@ module "kube-hetzner" {

# Enable automatic backups via Hetzner (default: false)
# backups = true

# To disable public ips (default: false)
# disable_ipv4 = true
# disable_ipv6 = true
},
{
name = "control-plane-nbg1",
Expand All @@ -150,6 +154,10 @@ module "kube-hetzner" {

# Enable automatic backups via Hetzner (default: false)
# backups = true

# To disable public ips (default: false)
# disable_ipv4 = true
# disable_ipv6 = true
},
{
name = "control-plane-hel1",
Expand All @@ -164,6 +172,10 @@ module "kube-hetzner" {

# Enable automatic backups via Hetzner (default: false)
# backups = true

# To disable public ips (default: false)
# disable_ipv4 = true
# disable_ipv6 = true
}
]

Expand Down
4 changes: 2 additions & 2 deletions kubeconfig.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
data "remote_file" "kubeconfig" {
conn {
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = local.first_control_plane_ip
port = var.ssh_port
user = "root"
private_key = var.ssh_private_key
Expand All @@ -13,7 +13,7 @@ data "remote_file" "kubeconfig" {

locals {
kubeconfig_server_address = var.kubeconfig_server_address != "" ? var.kubeconfig_server_address : (var.use_control_plane_lb ? hcloud_load_balancer.control_plane.*.ipv4[0] : (
can(module.control_planes[keys(module.control_planes)[0]].ipv4_address) ? module.control_planes[keys(module.control_planes)[0]].ipv4_address : "unknown"
can(local.first_control_plane_ip) ? local.first_control_plane_ip : "unknown"
))
kubeconfig_external = replace(replace(data.remote_file.kubeconfig.content, "127.0.0.1", local.kubeconfig_server_address), "default", var.cluster_name)
kubeconfig_parsed = yamldecode(local.kubeconfig_external)
Expand Down
4 changes: 2 additions & 2 deletions kustomization_user.tf
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ resource "null_resource" "kustomization_user" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = local.first_control_plane_ip
port = var.ssh_port
}

Expand Down Expand Up @@ -40,7 +40,7 @@ resource "null_resource" "kustomization_user_deploy" {
user = "root"
private_key = var.ssh_private_key
agent_identity = local.ssh_agent_identity
host = module.control_planes[keys(module.control_planes)[0]].ipv4_address
host = local.first_control_plane_ip
port = var.ssh_port
}

Expand Down
31 changes: 22 additions & 9 deletions locals.tf
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,10 @@ locals {
index : node_index
selinux : nodepool_obj.selinux
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
placement_group : nodepool_obj.placement_group
placement_group : nodepool_obj.placement_group,
disable_ipv4: nodepool_obj.disable_ipv4,
disable_ipv6: nodepool_obj.disable_ipv6,
network_id: nodepool_obj.network_id,
}
}
]...)
Expand All @@ -167,7 +170,10 @@ locals {
index : node_index
selinux : nodepool_obj.selinux
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
placement_group : nodepool_obj.placement_group
placement_group : nodepool_obj.placement_group,
disable_ipv4: nodepool_obj.disable_ipv4,
disable_ipv6: nodepool_obj.disable_ipv6,
network_id: nodepool_obj.network_id,
}
}
]...)
Expand All @@ -193,6 +199,9 @@ locals {
placement_group_compat_idx : nodepool_obj.placement_group_compat_idx,
placement_group : nodepool_obj.placement_group,
index : floor(tonumber(node_key)),
disable_ipv4: nodepool_obj.disable_ipv4,
disable_ipv6: nodepool_obj.disable_ipv6,
network_id: nodepool_obj.network_id,
},
{ for key, value in node_obj : key => value if value != null },
{
Expand Down Expand Up @@ -835,8 +844,9 @@ cloudinit_write_files_common = <<EOT
set -euo pipefail

sleep 11

INTERFACE=$(ip link show | awk '/^3:/{print $2}' | sed 's/://g')

# Take row beginning with 3 if exists, 2 otherwise (if only a private ip)
INTERFACE=$(ip link show | awk 'BEGIN{l3=""}; /^3:/{l3=$2}; /^2:/{l2=$2}; END{if(l3!="") print l3; else print l2}' | sed 's/://g')
MAC=$(cat /sys/class/net/$INTERFACE/address)

cat <<EOF > /etc/udev/rules.d/70-persistent-net.rules
Expand All @@ -846,11 +856,14 @@ cloudinit_write_files_common = <<EOT
ip link set $INTERFACE down
ip link set $INTERFACE name eth1
ip link set eth1 up

eth0_connection=$(nmcli -g GENERAL.CONNECTION device show eth0)
nmcli connection modify "$eth0_connection" \
con-name eth0 \
connection.interface-name eth0

# In case of a private only network, eth0 does not exists
if ip link show eth0 &>/dev/null; then
eth0_connection=$(nmcli -g GENERAL.CONNECTION device show eth0)
nmcli connection modify "$eth0_connection" \
con-name eth0 \
connection.interface-name eth0
fi

eth1_connection=$(nmcli -g GENERAL.CONNECTION device show eth1)
nmcli connection modify "$eth1_connection" \
Expand Down
Loading

0 comments on commit e187c93

Please sign in to comment.