Skip to content

Commit

Permalink
deployer: update daemonset_requests.yaml
Browse files Browse the repository at this point in the history
  • Loading branch information
consideRatio committed May 15, 2024
1 parent 4b56a5a commit 020f908
Showing 1 changed file with 61 additions and 35 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,30 @@
# - We may deploy or change a DaemonSet's requests over time (support-cryptnono,
# support-prometheus-node-exporter)
#
# Besides requests from DaemonSet resources, GKE apparently has a controller
# creating a standalone kube-proxy pod per node, currently requesting 100m CPU
# (no memory requests). These needs to be accounted for as well in the end, but
# isn't tracked by in this file.
#
# This file isn't updated by automation, but can easily be updated by manually
# running a command once for each cluster:
#
# deployer config get-clusters | xargs -I {} deployer generate resource-allocation daemonset-requests {}
#
gke:
# Current overhead is 454m and 656Mi (May 15 2024 with EKS GKE 1.29.1).
#
# About kube-proxy:
#
# GKE's node controller creates a standalone kube-proxy pod per node not
# observed by inspecting the daemonsets' requests, currently requesting 100m
# CPU (no memory requests). These needs to be accounted for as well in the
# end, but isn't tracked by in this file.
#
# About daemonsets varying between GKE clusters:
#
# - gke-metrics-agent requests 3+3m CPU and 60+40Mi memory, its not clear
# why some clusters but not all have it.
# - calico-node requests 100m CPU and 0Mi memory, its around if network
# policy enforcement is enabled.
# - ip-masq-agent requests 10m CPU and 16Mi memory, its around if network
# policy enforcement is enabled.
#
2i2c:
requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: binder-staging-dind,binder-staging-image-cleaner,imagebuilding-demo-binderhub-service-docker-api
Expand All @@ -43,13 +56,8 @@ gke:
cpu_requests: 354m
memory_requests: 656Mi
k8s_version: v1.29.1-gke.1589018
callysto:
requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 344m
memory_requests: 596Mi
k8s_version: v1.27.7-gke.1056000
catalystproject-latam:
# missing: gke-metrics-agent
requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 348m
Expand All @@ -62,11 +70,12 @@ gke:
memory_requests: 656Mi
k8s_version: v1.29.1-gke.1589018
hhmi:
# missing: calico-node, ip-masq-agent, gke-metrics-agent
requesting_daemon_sets: fluentbit-gke,gke-metadata-server,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 228m
memory_requests: 480Mi
k8s_version: v1.27.10-gke.1055000
cpu_requests: 238m
memory_requests: 540Mi
k8s_version: v1.29.1-gke.1589020
leap:
requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,gke-metrics-agent,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: maintenance-handler
Expand All @@ -80,6 +89,7 @@ gke:
memory_requests: 656Mi
k8s_version: v1.29.1-gke.1589018
meom-ige:
# missing: calico-node, ip-masq-agent
requesting_daemon_sets: fluentbit-gke,gke-metadata-server,gke-metrics-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 244m
Expand All @@ -92,120 +102,136 @@ gke:
memory_requests: 656Mi
k8s_version: v1.29.1-gke.1589018
qcl:
# missing: gke-metrics-agent
requesting_daemon_sets: calico-node,fluentbit-gke,gke-metadata-server,ip-masq-agent,netd,pdcsi-node,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 348m
memory_requests: 556Mi
k8s_version: v1.29.1-gke.1589018
eks:
# Current overhead is 195m and 250Mi (May 15 2024 with EKS 1.29.4).
#
# About daemonsets varying between GKE clusters:
#
# - aws-node requests 25m or 50m depending on if it has the extra container
# running aws-network-policy-agent or not.
#
# ref: https://github.com/2i2c-org/infrastructure/issues/1794
#
2i2c-aws-us:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
bican:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
memory_requests: 250Mi
k8s_version: v1.29.1-eks-b9c9ed7
k8s_version: v1.29.3-eks-adc7111
catalystproject-africa:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.3-eks-adc7111
dandi:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
memory_requests: 250Mi
k8s_version: v1.29.1-eks-b9c9ed7
k8s_version: v1.29.3-eks-adc7111
earthscope:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.28.6-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
gridsst:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
jupyter-health:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
memory_requests: 250Mi
k8s_version: v1.29.1-eks-b9c9ed7
k8s_version: v1.29.3-eks-adc7111
jupyter-meets-the-earth:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
kitware:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
memory_requests: 250Mi
k8s_version: v1.29.3-eks-adc7111
linc:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
memory_requests: 250Mi
k8s_version: v1.29.1-eks-b9c9ed7
k8s_version: v1.29.3-eks-adc7111
nasa-cryo:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
nasa-esdis:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
nasa-ghg:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
nasa-veda:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
openscapes:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
opensci:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 195m
other_daemon_sets: sciencecore-binderhub-service-docker-api
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.28.6-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
smithsonian:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
ubc-eoas:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
victor:
requesting_daemon_sets: aws-node,ebs-csi-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
other_daemon_sets: ""
cpu_requests: 170m
memory_requests: 250Mi
k8s_version: v1.27.10-eks-508b6b3
k8s_version: v1.29.4-eks-036c24b
aks:
utoronto:
requesting_daemon_sets: cloud-node-manager,csi-azuredisk-node,csi-azurefile-node,kube-proxy,support-cryptnono,support-prometheus-node-exporter
Expand Down

0 comments on commit 020f908

Please sign in to comment.