Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(helm): update chart cilium ( 1.15.7 β†’ 1.16.0 ) #156

Merged
merged 1 commit into from
Aug 3, 2024

Conversation

renovate[bot]
Copy link
Contributor

@renovate renovate bot commented Jul 25, 2024

Mend Renovate

This PR contains the following updates:

Package Update Change
cilium (source) minor 1.15.7 -> 1.16.0

Release Notes

cilium/cilium (cilium)

v1.16.0

Compare Source


Configuration

πŸ“… Schedule: Branch creation - "on friday and saturday" in timezone Europe/Prague, Automerge - At any time (no schedule defined).

🚦 Automerge: Disabled by config. Please merge this manually once you are satisfied.

β™» Rebasing: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox.

πŸ”• Ignore: Close this PR and you won't be reminded about these updates again.


  • If you want to rebase/retry this PR, check this box

This PR was generated by Mend Renovate. View the repository job log.

Copy link

--- kubernetes/storage/apps/kube-system/cilium/app Kustomization: flux-system/cilium HelmRelease: kube-system/cilium

+++ kubernetes/storage/apps/kube-system/cilium/app Kustomization: flux-system/cilium HelmRelease: kube-system/cilium

@@ -13,13 +13,13 @@

     spec:
       chart: cilium
       sourceRef:
         kind: HelmRepository
         name: cilium
         namespace: flux-system
-      version: 1.15.7
+      version: 1.16.0
   install:
     remediation:
       retries: 3
   interval: 30m
   upgrade:
     cleanupOnFail: true

Copy link

--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-dashboard

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-dashboard

@@ -4703,27 +4703,27 @@

           ],
           "spaceLength": 10,
           "stack": false,
           "steppedLine": false,
           "targets": [
             {
-              "expr": "sum(rate(cilium_policy_l7_denied_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"denied\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "denied",
               "refId": "A"
             },
             {
-              "expr": "sum(rate(cilium_policy_l7_forwarded_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"forwarded\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "forwarded",
               "refId": "B"
             },
             {
-              "expr": "sum(rate(cilium_policy_l7_received_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m]))",
+              "expr": "sum(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"received\"}[1m]))",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "received",
               "refId": "C"
             }
           ],
@@ -4869,13 +4869,13 @@

           }
         },
         {
           "aliasColors": {
             "Max per node processingTime": "#e24d42",
             "Max per node upstreamTime": "#58140c",
-            "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})": "#bf1b00",
+            "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})": "#bf1b00",
             "parse errors": "#bf1b00"
           },
           "bars": true,
           "dashLength": 10,
           "dashes": false,
           "datasource": {
@@ -4928,13 +4928,13 @@

             },
             {
               "alias": "Max per node upstreamTime",
               "yaxis": 2
             },
             {
-              "alias": "avg(cilium_policy_l7_parse_errors_total{pod=~\"cilium.*\"})",
+              "alias": "avg(cilium_policy_l7_total{pod=~\"cilium.*\", rule=\"parse_errors\"})",
               "yaxis": 2
             },
             {
               "alias": "parse errors",
               "yaxis": 2
             }
@@ -4949,13 +4949,13 @@

               "interval": "",
               "intervalFactor": 1,
               "legendFormat": "{{scope}}",
               "refId": "A"
             },
             {
-              "expr": "avg(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}) by (pod)",
+              "expr": "avg(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}) by (pod)",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "parse errors",
               "refId": "B"
             }
           ],
@@ -5307,13 +5307,13 @@

               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "Max {{scope}}",
               "refId": "B"
             },
             {
-              "expr": "max(rate(cilium_policy_l7_parse_errors_total{k8s_app=\"cilium\", pod=~\"$pod\"}[1m])) by (pod)",
+              "expr": "max(rate(cilium_policy_l7_total{k8s_app=\"cilium\", pod=~\"$pod\", rule=\"parse_errors\"}[1m])) by (pod)",
               "format": "time_series",
               "intervalFactor": 1,
               "legendFormat": "parse errors",
               "refId": "A"
             }
           ],
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-config

@@ -7,20 +7,18 @@

 data:
   identity-allocation-mode: crd
   identity-heartbeat-timeout: 30m0s
   identity-gc-interval: 15m0s
   cilium-endpoint-gc-interval: 5m0s
   nodes-gc-interval: 5m0s
-  skip-cnp-status-startup-clean: 'false'
   debug: 'false'
   debug-verbose: ''
   enable-policy: default
   policy-cidr-match-mode: ''
   prometheus-serve-addr: :9962
   controller-group-metrics: write-cni-file sync-host-ips sync-lb-maps-with-k8s-services
-  proxy-prometheus-port: '9964'
   operator-prometheus-serve-addr: :9963
   enable-metrics: 'true'
   enable-ipv4: 'true'
   enable-ipv6: 'false'
   custom-cni-conf: 'false'
   enable-bpf-clock-probe: 'false'
@@ -28,58 +26,69 @@

   monitor-aggregation-interval: 5s
   monitor-aggregation-flags: all
   bpf-map-dynamic-size-ratio: '0.0025'
   bpf-policy-map-max: '16384'
   bpf-lb-map-max: '65536'
   bpf-lb-external-clusterip: 'false'
+  bpf-events-drop-enabled: 'true'
+  bpf-events-policy-verdict-enabled: 'true'
+  bpf-events-trace-enabled: 'true'
   preallocate-bpf-maps: 'false'
-  sidecar-istio-proxy-image: cilium/istio_proxy
   cluster-name: home-storage
   cluster-id: '1'
   routing-mode: native
   service-no-backend-response: reject
   enable-l7-proxy: 'true'
   enable-ipv4-masquerade: 'true'
   enable-ipv4-big-tcp: 'false'
   enable-ipv6-big-tcp: 'false'
   enable-ipv6-masquerade: 'true'
+  enable-tcx: 'true'
+  datapath-mode: veth
   enable-bpf-masquerade: 'false'
   enable-masquerade-to-route-source: 'false'
   enable-xt-socket-fallback: 'true'
   install-no-conntrack-iptables-rules: 'false'
   auto-direct-node-routes: 'true'
+  direct-routing-skip-unreachable: 'false'
   enable-local-redirect-policy: 'true'
   ipv4-native-routing-cidr: 10.10.0.0/16
   devices: lan
+  enable-runtime-device-detection: 'true'
   kube-proxy-replacement: 'true'
   kube-proxy-replacement-healthz-bind-address: 0.0.0.0:10256
   bpf-lb-sock: 'false'
+  bpf-lb-sock-terminate-pod-connections: 'false'
+  nodeport-addresses: ''
   enable-health-check-nodeport: 'true'
   enable-health-check-loadbalancer-ip: 'false'
   node-port-bind-protection: 'true'
   enable-auto-protect-node-port-range: 'true'
   bpf-lb-mode: hybrid
   bpf-lb-algorithm: maglev
   bpf-lb-acceleration: disabled
   enable-svc-source-range-check: 'true'
   enable-l2-neigh-discovery: 'true'
   arping-refresh-period: 30s
+  k8s-require-ipv4-pod-cidr: 'false'
+  k8s-require-ipv6-pod-cidr: 'false'
   enable-endpoint-routes: 'true'
   enable-k8s-networkpolicy: 'true'
   write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
   cni-exclusive: 'false'
   cni-log-file: /var/run/cilium/cilium-cni.log
   enable-endpoint-health-checking: 'true'
   enable-health-checking: 'true'
   enable-well-known-identities: 'false'
-  enable-remote-node-identity: 'true'
+  enable-node-selector-labels: 'false'
   synchronize-k8s-nodes: 'true'
   operator-api-serve-addr: 127.0.0.1:9234
   enable-hubble: 'true'
   hubble-socket-path: /var/run/cilium/hubble.sock
   hubble-metrics-server: :9965
+  hubble-metrics-server-enable-tls: 'false'
   hubble-metrics: dns:query drop tcp flow port-distribution icmp http
   enable-hubble-open-metrics: 'false'
   hubble-export-file-max-size-mb: '10'
   hubble-export-file-max-backups: '5'
   hubble-listen-address: :4244
   hubble-disable-tls: 'false'
@@ -110,12 +119,13 @@

   k8s-client-burst: '20'
   remove-cilium-node-taints: 'true'
   set-cilium-node-taints: 'true'
   set-cilium-is-up-condition: 'true'
   unmanaged-pod-watcher-interval: '15'
   dnsproxy-enable-transparent-mode: 'true'
+  dnsproxy-socket-linger-timeout: '10'
   tofqdns-dns-reject-response-code: refused
   tofqdns-enable-dns-compression: 'true'
   tofqdns-endpoint-max-ip-per-hostname: '50'
   tofqdns-idle-connection-grace-period: 0s
   tofqdns-max-deferred-connection-deletes: '10000'
   tofqdns-proxy-response-max-delay: 100ms
@@ -127,9 +137,13 @@

   proxy-xff-num-trusted-hops-ingress: '0'
   proxy-xff-num-trusted-hops-egress: '0'
   proxy-connect-timeout: '2'
   proxy-max-requests-per-connection: '0'
   proxy-max-connection-duration-seconds: '0'
   proxy-idle-timeout-seconds: '60'
-  external-envoy-proxy: 'false'
+  external-envoy-proxy: 'true'
+  envoy-base-id: '0'
+  envoy-keep-cap-netbindservice: 'false'
   max-connected-clusters: '255'
+  clustermesh-enable-endpoint-sync: 'false'
+  clustermesh-enable-mcs-api: 'false'
 
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-operator-dashboard

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-operator-dashboard

@@ -11,17 +11,30 @@

     grafana_dashboard: '1'
   annotations:
     grafana_folder: Cilium
 data:
   cilium-operator-dashboard.json: |
     {
+      "__inputs": [
+        {
+          "name": "DS_PROMETHEUS",
+          "label": "prometheus",
+          "description": "",
+          "type": "datasource",
+          "pluginId": "prometheus",
+          "pluginName": "Prometheus"
+        }
+      ],
       "annotations": {
         "list": [
           {
             "builtIn": 1,
-            "datasource": "-- Grafana --",
+            "datasource": {
+              "type": "datasource",
+              "uid": "grafana"
+            },
             "enable": true,
             "hide": true,
             "iconColor": "rgba(0, 211, 255, 1)",
             "name": "Annotations & Alerts",
             "type": "dashboard"
           }
@@ -37,13 +50,16 @@

           "aliasColors": {
             "avg": "#cffaff"
           },
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -163,13 +179,16 @@

           "aliasColors": {
             "MAX_resident_memory_bytes_max": "#e5ac0e"
           },
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -293,13 +312,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -390,13 +412,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -487,13 +512,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -584,13 +612,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -681,13 +712,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -778,13 +812,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
@@ -875,13 +912,16 @@

         },
         {
           "aliasColors": {},
           "bars": false,
           "dashLength": 10,
           "dashes": false,
-          "datasource": "prometheus",
+          "datasource": {
+            "type": "prometheus",
+            "uid": "${DS_PROMETHEUS}"
+          },
           "fieldConfig": {
             "defaults": {
               "custom": {}
             },
             "overrides": []
           },
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-relay-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-relay-config

@@ -6,9 +6,9 @@

   namespace: kube-system
 data:
   config.yaml: "cluster-name: home-storage\npeer-service: \"hubble-peer.kube-system.svc.cluster.local:443\"\
     \nlisten-address: :4245\ngops: true\ngops-port: \"9893\"\ndial-timeout: \nretry-timeout:\
     \ \nsort-buffer-len-max: \nsort-buffer-drain-timeout: \ntls-hubble-client-cert-file:\
     \ /var/lib/hubble-relay/tls/client.crt\ntls-hubble-client-key-file: /var/lib/hubble-relay/tls/client.key\n\
-    tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\ndisable-server-tls:\
-    \ true\n"
+    tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt\n\n\
+    disable-server-tls: true\n"
 
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-dashboard

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-dashboard

@@ -9,3256 +9,1059 @@

     app.kubernetes.io/name: hubble
     app.kubernetes.io/part-of: cilium
     grafana_dashboard: '1'
   annotations:
     grafana_folder: Cilium
 data:
-  hubble-dashboard.json: |
-    {
-      "annotations": {
-        "list": [
-          {
-            "builtIn": 1,
-            "datasource": "-- Grafana --",
-            "enable": true,
-            "hide": true,
-            "iconColor": "rgba(0, 211, 255, 1)",
-            "name": "Annotations & Alerts",
-            "type": "dashboard"
-          }
-        ]
-      },
-      "editable": true,
-      "gnetId": null,
-      "graphTooltip": 0,
-      "id": 3,
-      "links": [],
-      "panels": [
-        {
-          "collapsed": false,
-          "gridPos": {
-            "h": 1,
-            "w": 24,
-            "x": 0,
-            "y": 0
-          },
-          "id": 14,
-          "panels": [],
-          "title": "General Processing",
-          "type": "row"
-        },
-        {
-          "aliasColors": {},
-          "bars": false,
-          "dashLength": 10,
-          "dashes": false,
-          "datasource": "prometheus",
-          "fill": 1,
-          "gridPos": {
-            "h": 5,
-            "w": 12,
-            "x": 0,
-            "y": 1
-          },
-          "id": 12,
-          "legend": {
-            "avg": false,
-            "current": false,
-            "max": false,
-            "min": false,
-            "show": true,
-            "total": false,
-            "values": false
-          },
-          "lines": true,
-          "linewidth": 1,
-          "links": [],
-          "nullPointMode": "null",
-          "options": {},
-          "percentage": false,
-          "pointradius": 2,
-          "points": false,
-          "renderer": "flot",
-          "seriesOverrides": [
-            {
-              "alias": "max",
-              "fillBelowTo": "avg",
-              "lines": false
-            },
-            {
-              "alias": "avg",
-              "fill": 0,
-              "fillBelowTo": "min"
-            },
-            {
-              "alias": "min",
-              "lines": false
-            }
-          ],
-          "spaceLength": 10,
-          "stack": false,
-          "steppedLine": false,
-          "targets": [
-            {
-              "expr": "avg(sum(rate(hubble_flows_processed_total[1m])) by (pod))",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "avg",
-              "refId": "A"
-            },
-            {
-              "expr": "min(sum(rate(hubble_flows_processed_total[1m])) by (pod))",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "min",
-              "refId": "B"
-            },
-            {
-              "expr": "max(sum(rate(hubble_flows_processed_total[1m])) by (pod))",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "max",
-              "refId": "C"
-            }
-          ],
-          "thresholds": [],
-          "timeFrom": null,
-          "timeRegions": [],
-          "timeShift": null,
-          "title": "Flows processed Per Node",
-          "tooltip": {
-            "shared": true,
-            "sort": 1,
-            "value_type": "individual"
-          },
-          "type": "graph",
-          "xaxis": {
-            "buckets": null,
-            "mode": "time",
-            "name": null,
-            "show": true,
-            "values": []
-          },
-          "yaxes": [
-            {
-              "format": "ops",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            },
-            {
-              "format": "short",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            }
-          ],
-          "yaxis": {
-            "align": false,
-            "alignLevel": null
-          }
-        },
-        {
-          "aliasColors": {},
-          "bars": false,
-          "dashLength": 10,
-          "dashes": false,
-          "datasource": "prometheus",
-          "fill": 1,
-          "gridPos": {
-            "h": 5,
-            "w": 12,
-            "x": 12,
-            "y": 1
-          },
-          "id": 32,
-          "legend": {
-            "avg": false,
-            "current": false,
-            "max": false,
-            "min": false,
-            "show": true,
-            "total": false,
-            "values": false
-          },
-          "lines": true,
-          "linewidth": 1,
-          "links": [],
-          "nullPointMode": "null",
-          "options": {},
-          "percentage": false,
-          "pointradius": 2,
-          "points": false,
-          "renderer": "flot",
-          "seriesOverrides": [],
-          "spaceLength": 10,
-          "stack": true,
-          "steppedLine": false,
-          "targets": [
-            {
-              "expr": "sum(rate(hubble_flows_processed_total[1m])) by (pod, type)",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "{{type}}",
-              "refId": "A"
-            }
-          ],
-          "thresholds": [],
-          "timeFrom": null,
-          "timeRegions": [],
-          "timeShift": null,
-          "title": "Flows Types",
-          "tooltip": {
-            "shared": true,
-            "sort": 2,
-            "value_type": "individual"
-          },
-          "type": "graph",
-          "xaxis": {
-            "buckets": null,
-            "mode": "time",
-            "name": null,
-            "show": true,
-            "values": []
-          },
-          "yaxes": [
-            {
-              "format": "ops",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            },
-            {
-              "format": "short",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            }
-          ],
-          "yaxis": {
-            "align": false,
-            "alignLevel": null
-          }
-        },
-        {
-          "aliasColors": {},
-          "bars": false,
-          "dashLength": 10,
-          "dashes": false,
-          "datasource": "prometheus",
-          "fill": 1,
-          "gridPos": {
-            "h": 5,
-            "w": 12,
-            "x": 0,
-            "y": 6
-          },
-          "id": 59,
-          "legend": {
-            "avg": false,
-            "current": false,
-            "max": false,
-            "min": false,
-            "show": true,
-            "total": false,
-            "values": false
-          },
-          "lines": true,
-          "linewidth": 1,
-          "links": [],
-          "nullPointMode": "null",
-          "options": {},
-          "percentage": false,
-          "pointradius": 2,
-          "points": false,
-          "renderer": "flot",
-          "seriesOverrides": [],
-          "spaceLength": 10,
-          "stack": true,
-          "steppedLine": false,
-          "targets": [
-            {
-              "expr": "sum(rate(hubble_flows_processed_total{type=\"L7\"}[1m])) by (pod, subtype)",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "{{subtype}}",
-              "refId": "A"
-            }
-          ],
-          "thresholds": [],
-          "timeFrom": null,
-          "timeRegions": [],
-          "timeShift": null,
-          "title": "L7 Flow Distribution",
-          "tooltip": {
-            "shared": true,
-            "sort": 2,
-            "value_type": "individual"
-          },
-          "type": "graph",
-          "xaxis": {
-            "buckets": null,
-            "mode": "time",
-            "name": null,
-            "show": true,
-            "values": []
-          },
-          "yaxes": [
-            {
-              "format": "ops",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            },
-            {
-              "format": "short",
-              "label": null,
-              "logBase": 1,
-              "max": null,
-              "min": null,
-              "show": true
-            }
-          ],
-          "yaxis": {
-            "align": false,
-            "alignLevel": null
-          }
-        },
-        {
-          "aliasColors": {},
-          "bars": false,
-          "dashLength": 10,
-          "dashes": false,
-          "datasource": "prometheus",
-          "fill": 1,
-          "gridPos": {
-            "h": 5,
-            "w": 12,
-            "x": 12,
-            "y": 6
-          },
-          "id": 60,
-          "legend": {
-            "avg": false,
-            "current": false,
-            "max": false,
-            "min": false,
-            "show": true,
-            "total": false,
-            "values": false
-          },
-          "lines": true,
-          "linewidth": 1,
-          "links": [],
-          "nullPointMode": "null",
-          "options": {},
-          "percentage": false,
-          "pointradius": 2,
-          "points": false,
-          "renderer": "flot",
-          "seriesOverrides": [],
-          "spaceLength": 10,
-          "stack": true,
-          "steppedLine": false,
-          "targets": [
-            {
-              "expr": "sum(rate(hubble_flows_processed_total{type=\"Trace\"}[1m])) by (pod, subtype)",
-              "format": "time_series",
-              "intervalFactor": 1,
-              "legendFormat": "{{subtype}}",
-              "refId": "A"
-            }
-          ],
-          "thresholds": [],
-          "timeFrom": null,
-          "timeRegions": [],
-          "timeShift": null,
-          "title": "Trace Flow Distribution",
-          "tooltip": {
[Diff truncated by flux-local]
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-l7-http-metrics-by-workload

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/hubble-l7-http-metrics-by-workload

@@ -11,13 +11,22 @@

     grafana_dashboard: '1'
   annotations:
     grafana_folder: Cilium
 data:
   hubble-l7-http-metrics-by-workload.json: |
     {
-      "__inputs": [],
+      "__inputs": [
+        {
+          "name": "DS_PROMETHEUS",
+          "label": "prometheus",
+          "description": "",
+          "type": "datasource",
+          "pluginId": "prometheus",
+          "pluginName": "Prometheus"
+        }
+      ],
       "__elements": {},
       "__requires": [
         {
           "type": "grafana",
           "id": "grafana",
           "name": "Grafana",
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium

@@ -106,14 +106,12 @@

   verbs:
   - get
   - update
 - apiGroups:
   - cilium.io
   resources:
-  - ciliumnetworkpolicies/status
-  - ciliumclusterwidenetworkpolicies/status
   - ciliumendpoints/status
   - ciliumendpoints
   - ciliuml2announcementpolicies/status
   - ciliumbgpnodeconfigs/status
   verbs:
   - patch
--- HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium ClusterRole: kube-system/cilium-operator

@@ -170,12 +170,13 @@

   - ciliumpodippools.cilium.io
 - apiGroups:
   - cilium.io
   resources:
   - ciliumloadbalancerippools
   - ciliumpodippools
+  - ciliumbgppeeringpolicies
   - ciliumbgpclusterconfigs
   - ciliumbgpnodeconfigoverrides
   verbs:
   - get
   - list
   - watch
--- HelmRelease: kube-system/cilium Service: kube-system/cilium-agent

+++ HelmRelease: kube-system/cilium Service: kube-system/cilium-agent

@@ -15,11 +15,7 @@

     k8s-app: cilium
   ports:
   - name: metrics
     port: 9962
     protocol: TCP
     targetPort: prometheus
-  - name: envoy-metrics
-    port: 9964
-    protocol: TCP
-    targetPort: envoy-metrics
 
--- HelmRelease: kube-system/cilium Service: kube-system/hubble-relay

+++ HelmRelease: kube-system/cilium Service: kube-system/hubble-relay

@@ -12,8 +12,8 @@

   type: ClusterIP
   selector:
     k8s-app: hubble-relay
   ports:
   - protocol: TCP
     port: 80
-    targetPort: 4245
+    targetPort: grpc
 
--- HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

+++ HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium

@@ -16,24 +16,24 @@

     rollingUpdate:
       maxUnavailable: 2
     type: RollingUpdate
   template:
     metadata:
       annotations:
-        cilium.io/cilium-configmap-checksum: c76a56f35107ccd426da8317d4365b20078504bfbf833c6020ea2bcb268f4c9f
+        cilium.io/cilium-configmap-checksum: ed85ad04044c176d052152e3f4e142ceb385678495ff160e10e218c6a11dc0ce
       labels:
         k8s-app: cilium
         app.kubernetes.io/name: cilium-agent
         app.kubernetes.io/part-of: cilium
     spec:
       securityContext:
         appArmorProfile:
           type: Unconfined
       containers:
       - name: cilium-agent
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - cilium-agent
         args:
         - --config-dir=/tmp/cilium/config-map
         startupProbe:
@@ -133,16 +133,12 @@

           hostPort: 4244
           protocol: TCP
         - name: prometheus
           containerPort: 9962
           hostPort: 9962
           protocol: TCP
-        - name: envoy-metrics
-          containerPort: 9964
-          hostPort: 9964
-          protocol: TCP
         - name: hubble-metrics
           containerPort: 9965
           hostPort: 9965
           protocol: TCP
         securityContext:
           seLinuxOptions:
@@ -162,12 +158,15 @@

             - SETGID
             - SETUID
             drop:
             - ALL
         terminationMessagePolicy: FallbackToLogsOnError
         volumeMounts:
+        - name: envoy-sockets
+          mountPath: /var/run/cilium/envoy/sockets
+          readOnly: false
         - mountPath: /host/proc/sys/net
           name: host-proc-sys-net
         - mountPath: /host/proc/sys/kernel
           name: host-proc-sys-kernel
         - name: bpf-maps
           mountPath: /sys/fs/bpf
@@ -190,13 +189,13 @@

           mountPath: /var/lib/cilium/tls/hubble
           readOnly: true
         - name: tmp
           mountPath: /tmp
       initContainers:
       - name: config
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - cilium-dbg
         - build-config
         env:
         - name: K8S_NODE_NAME
@@ -215,13 +214,13 @@

           value: '6444'
         volumeMounts:
         - name: tmp
           mountPath: /tmp
         terminationMessagePolicy: FallbackToLogsOnError
       - name: mount-cgroup
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         env:
         - name: CGROUP_ROOT
           value: /sys/fs/cgroup
         - name: BIN_PATH
           value: /var/lib/rancher/k3s/data/current/bin
@@ -247,13 +246,13 @@

             - SYS_ADMIN
             - SYS_CHROOT
             - SYS_PTRACE
             drop:
             - ALL
       - name: apply-sysctl-overwrites
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         env:
         - name: BIN_PATH
           value: /var/lib/rancher/k3s/data/current/bin
         command:
         - sh
@@ -277,13 +276,13 @@

             - SYS_ADMIN
             - SYS_CHROOT
             - SYS_PTRACE
             drop:
             - ALL
       - name: mount-bpf-fs
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         args:
         - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
         command:
         - /bin/bash
         - -c
@@ -293,13 +292,13 @@

           privileged: true
         volumeMounts:
         - name: bpf-maps
           mountPath: /sys/fs/bpf
           mountPropagation: Bidirectional
       - name: clean-cilium-state
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - /init-container.sh
         env:
         - name: CILIUM_ALL_STATE
           valueFrom:
@@ -341,13 +340,13 @@

         - name: cilium-cgroup
           mountPath: /sys/fs/cgroup
           mountPropagation: HostToContainer
         - name: cilium-run
           mountPath: /var/run/cilium
       - name: install-cni-binaries
-        image: quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0
+        image: quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058
         imagePullPolicy: IfNotPresent
         command:
         - /install-plugin.sh
         resources:
           requests:
             cpu: 100m
@@ -362,13 +361,12 @@

         terminationMessagePolicy: FallbackToLogsOnError
         volumeMounts:
         - name: cni-path
           mountPath: /host/opt/cni/bin
       restartPolicy: Always
       priorityClassName: system-node-critical
-      serviceAccount: cilium
       serviceAccountName: cilium
       automountServiceAccountToken: true
       terminationGracePeriodSeconds: 1
       hostNetwork: true
       affinity:
         podAntiAffinity:
@@ -412,12 +410,16 @@

         hostPath:
           path: /lib/modules
       - name: xtables-lock
         hostPath:
           path: /run/xtables.lock
           type: FileOrCreate
+      - name: envoy-sockets
+        hostPath:
+          path: /var/run/cilium/envoy/sockets
+          type: DirectoryOrCreate
       - name: clustermesh-secrets
         projected:
           defaultMode: 256
           sources:
           - secret:
               name: cilium-clustermesh
@@ -429,12 +431,22 @@

               - key: tls.key
                 path: common-etcd-client.key
               - key: tls.crt
                 path: common-etcd-client.crt
               - key: ca.crt
                 path: common-etcd-client-ca.crt
+          - secret:
+              name: clustermesh-apiserver-local-cert
+              optional: true
+              items:
+              - key: tls.key
+                path: local-etcd-client.key
+              - key: tls.crt
+                path: local-etcd-client.crt
+              - key: ca.crt
+                path: local-etcd-client-ca.crt
       - name: host-proc-sys-net
         hostPath:
           path: /proc/sys/net
           type: Directory
       - name: host-proc-sys-kernel
         hostPath:
--- HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

+++ HelmRelease: kube-system/cilium Deployment: kube-system/cilium-operator

@@ -20,22 +20,22 @@

       maxSurge: 25%
       maxUnavailable: 100%
     type: RollingUpdate
   template:
     metadata:
       annotations:
-        cilium.io/cilium-configmap-checksum: c76a56f35107ccd426da8317d4365b20078504bfbf833c6020ea2bcb268f4c9f
+        cilium.io/cilium-configmap-checksum: ed85ad04044c176d052152e3f4e142ceb385678495ff160e10e218c6a11dc0ce
       labels:
         io.cilium/app: operator
         name: cilium-operator
         app.kubernetes.io/part-of: cilium
         app.kubernetes.io/name: cilium-operator
     spec:
       containers:
       - name: cilium-operator
-        image: quay.io/cilium/operator-generic:v1.15.7@sha256:6840a6dde703b3e73dd31e03390327a9184fcb888efbad9d9d098d65b9035b54
+        image: quay.io/cilium/operator-generic:v1.16.0@sha256:d6621c11c4e4943bf2998af7febe05be5ed6fdcf812b27ad4388f47022190316
         imagePullPolicy: IfNotPresent
         command:
         - cilium-operator-generic
         args:
         - --config-dir=/tmp/cilium/config-map
         - --debug=$(CILIUM_DEBUG)
@@ -89,13 +89,12 @@

           mountPath: /tmp/cilium/config-map
           readOnly: true
         terminationMessagePolicy: FallbackToLogsOnError
       hostNetwork: true
       restartPolicy: Always
       priorityClassName: system-cluster-critical
-      serviceAccount: cilium-operator
       serviceAccountName: cilium-operator
       automountServiceAccountToken: true
       affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
--- HelmRelease: kube-system/cilium Deployment: kube-system/hubble-relay

+++ HelmRelease: kube-system/cilium Deployment: kube-system/hubble-relay

@@ -17,13 +17,13 @@

     rollingUpdate:
       maxUnavailable: 1
     type: RollingUpdate
   template:
     metadata:
       annotations:
-        cilium.io/hubble-relay-configmap-checksum: 1a18a8361268a1b77025a977a3b960b917b26fb6370a057575c19c44f7747922
+        cilium.io/hubble-relay-configmap-checksum: 45099c3a19b083819b53bf3cd6bedb26149708309b64012b744daff966c66729
       labels:
         k8s-app: hubble-relay
         app.kubernetes.io/name: hubble-relay
         app.kubernetes.io/part-of: cilium
     spec:
       securityContext:
@@ -34,13 +34,13 @@

           capabilities:
             drop:
             - ALL
           runAsGroup: 65532
           runAsNonRoot: true
           runAsUser: 65532
-        image: quay.io/cilium/hubble-relay:v1.15.7@sha256:12870e87ec6c105ca86885c4ee7c184ece6b706cc0f22f63d2a62a9a818fd68f
+        image: quay.io/cilium/hubble-relay:v1.16.0@sha256:33fca7776fc3d7b2abe08873319353806dc1c5e07e12011d7da4da05f836ce8d
         imagePullPolicy: IfNotPresent
         command:
         - hubble-relay
         args:
         - serve
         ports:
@@ -50,30 +50,32 @@

           grpc:
             port: 4222
           timeoutSeconds: 3
         livenessProbe:
           grpc:
             port: 4222
-          timeoutSeconds: 3
+          timeoutSeconds: 10
+          initialDelaySeconds: 10
+          periodSeconds: 10
+          failureThreshold: 12
         startupProbe:
           grpc:
             port: 4222
-          timeoutSeconds: 3
+          initialDelaySeconds: 10
           failureThreshold: 20
           periodSeconds: 3
         volumeMounts:
         - name: config
           mountPath: /etc/hubble-relay
           readOnly: true
         - name: tls
           mountPath: /var/lib/hubble-relay/tls
           readOnly: true
         terminationMessagePolicy: FallbackToLogsOnError
       restartPolicy: Always
       priorityClassName: null
-      serviceAccount: hubble-relay
       serviceAccountName: hubble-relay
       automountServiceAccountToken: false
       terminationGracePeriodSeconds: 1
       affinity:
         podAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
--- HelmRelease: kube-system/cilium Deployment: kube-system/hubble-ui

+++ HelmRelease: kube-system/cilium Deployment: kube-system/hubble-ui

@@ -28,13 +28,12 @@

     spec:
       securityContext:
         fsGroup: 1001
         runAsGroup: 1001
         runAsUser: 1001
       priorityClassName: null
-      serviceAccount: hubble-ui
       serviceAccountName: hubble-ui
       automountServiceAccountToken: true
       containers:
       - name: frontend
         image: quay.io/cilium/hubble-ui:v0.13.1@sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6
         imagePullPolicy: IfNotPresent
--- HelmRelease: kube-system/cilium ServiceMonitor: kube-system/hubble

+++ HelmRelease: kube-system/cilium ServiceMonitor: kube-system/hubble

@@ -15,12 +15,13 @@

     - kube-system
   endpoints:
   - port: hubble-metrics
     interval: 10s
     honorLabels: true
     path: /metrics
+    scheme: http
     relabelings:
     - replacement: ${1}
       sourceLabels:
       - __meta_kubernetes_pod_node_name
       targetLabel: node
 
--- HelmRelease: kube-system/cilium ServiceAccount: kube-system/cilium-envoy

+++ HelmRelease: kube-system/cilium ServiceAccount: kube-system/cilium-envoy

@@ -0,0 +1,7 @@

+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: cilium-envoy
+  namespace: kube-system
+
--- HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-envoy-config

+++ HelmRelease: kube-system/cilium ConfigMap: kube-system/cilium-envoy-config

@@ -0,0 +1,326 @@

+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: cilium-envoy-config
+  namespace: kube-system
+data:
+  bootstrap-config.json: |
+    {
+      "node": {
+        "id": "host~127.0.0.1~no-id~localdomain",
+        "cluster": "ingress-cluster"
+      },
+      "staticResources": {
+        "listeners": [
+          {
+            "name": "envoy-prometheus-metrics-listener",
+            "address": {
+              "socket_address": {
+                "address": "0.0.0.0",
+                "port_value": 9964
+              }
+            },
+            "filter_chains": [
+              {
+                "filters": [
+                  {
+                    "name": "envoy.filters.network.http_connection_manager",
+                    "typed_config": {
+                      "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
+                      "stat_prefix": "envoy-prometheus-metrics-listener",
+                      "route_config": {
+                        "virtual_hosts": [
+                          {
+                            "name": "prometheus_metrics_route",
+                            "domains": [
+                              "*"
+                            ],
+                            "routes": [
+                              {
+                                "name": "prometheus_metrics_route",
+                                "match": {
+                                  "prefix": "/metrics"
+                                },
+                                "route": {
+                                  "cluster": "/envoy-admin",
+                                  "prefix_rewrite": "/stats/prometheus"
+                                }
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      "http_filters": [
+                        {
+                          "name": "envoy.filters.http.router",
+                          "typed_config": {
+                            "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
+                          }
+                        }
+                      ],
+                      "stream_idle_timeout": "0s"
+                    }
+                  }
+                ]
+              }
+            ]
+          },
+          {
+            "name": "envoy-health-listener",
+            "address": {
+              "socket_address": {
+                "address": "127.0.0.1",
+                "port_value": 9878
+              }
+            },
+            "filter_chains": [
+              {
+                "filters": [
+                  {
+                    "name": "envoy.filters.network.http_connection_manager",
+                    "typed_config": {
+                      "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
+                      "stat_prefix": "envoy-health-listener",
+                      "route_config": {
+                        "virtual_hosts": [
+                          {
+                            "name": "health",
+                            "domains": [
+                              "*"
+                            ],
+                            "routes": [
+                              {
+                                "name": "health",
+                                "match": {
+                                  "prefix": "/healthz"
+                                },
+                                "route": {
+                                  "cluster": "/envoy-admin",
+                                  "prefix_rewrite": "/ready"
+                                }
+                              }
+                            ]
+                          }
+                        ]
+                      },
+                      "http_filters": [
+                        {
+                          "name": "envoy.filters.http.router",
+                          "typed_config": {
+                            "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
+                          }
+                        }
+                      ],
+                      "stream_idle_timeout": "0s"
+                    }
+                  }
+                ]
+              }
+            ]
+          }
+        ],
+        "clusters": [
+          {
+            "name": "ingress-cluster",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s"
+          },
+          {
+            "name": "egress-cluster-tls",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "upstreamHttpProtocolOptions": {},
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s",
+            "transportSocket": {
+              "name": "cilium.tls_wrapper",
+              "typedConfig": {
+                "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
+              }
+            }
+          },
+          {
+            "name": "egress-cluster",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s"
+          },
+          {
+            "name": "ingress-cluster-tls",
+            "type": "ORIGINAL_DST",
+            "connectTimeout": "2s",
+            "lbPolicy": "CLUSTER_PROVIDED",
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "commonHttpProtocolOptions": {
+                  "idleTimeout": "60s",
+                  "maxConnectionDuration": "0s",
+                  "maxRequestsPerConnection": 0
+                },
+                "upstreamHttpProtocolOptions": {},
+                "useDownstreamProtocolConfig": {}
+              }
+            },
+            "cleanupInterval": "2.500s",
+            "transportSocket": {
+              "name": "cilium.tls_wrapper",
+              "typedConfig": {
+                "@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
+              }
+            }
+          },
+          {
+            "name": "xds-grpc-cilium",
+            "type": "STATIC",
+            "connectTimeout": "2s",
+            "loadAssignment": {
+              "clusterName": "xds-grpc-cilium",
+              "endpoints": [
+                {
+                  "lbEndpoints": [
+                    {
+                      "endpoint": {
+                        "address": {
+                          "pipe": {
+                            "path": "/var/run/cilium/envoy/sockets/xds.sock"
+                          }
+                        }
+                      }
+                    }
+                  ]
+                }
+              ]
+            },
+            "typedExtensionProtocolOptions": {
+              "envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
+                "@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
+                "explicitHttpConfig": {
+                  "http2ProtocolOptions": {}
+                }
+              }
+            }
+          },
+          {
+            "name": "/envoy-admin",
+            "type": "STATIC",
+            "connectTimeout": "2s",
+            "loadAssignment": {
+              "clusterName": "/envoy-admin",
+              "endpoints": [
+                {
+                  "lbEndpoints": [
+                    {
+                      "endpoint": {
+                        "address": {
+                          "pipe": {
+                            "path": "/var/run/cilium/envoy/sockets/admin.sock"
+                          }
+                        }
+                      }
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      },
+      "dynamicResources": {
+        "ldsConfig": {
+          "apiConfigSource": {
+            "apiType": "GRPC",
+            "transportApiVersion": "V3",
+            "grpcServices": [
+              {
+                "envoyGrpc": {
[Diff truncated by flux-local]
--- HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium-envoy

+++ HelmRelease: kube-system/cilium DaemonSet: kube-system/cilium-envoy

@@ -0,0 +1,171 @@

+---
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+  name: cilium-envoy
+  namespace: kube-system
+  labels:
+    k8s-app: cilium-envoy
+    app.kubernetes.io/part-of: cilium
+    app.kubernetes.io/name: cilium-envoy
+    name: cilium-envoy
+spec:
+  selector:
+    matchLabels:
+      k8s-app: cilium-envoy
+  updateStrategy:
+    rollingUpdate:
+      maxUnavailable: 2
+    type: RollingUpdate
+  template:
+    metadata:
+      annotations:
+        prometheus.io/port: '9964'
+        prometheus.io/scrape: 'true'
+      labels:
+        k8s-app: cilium-envoy
+        name: cilium-envoy
+        app.kubernetes.io/name: cilium-envoy
+        app.kubernetes.io/part-of: cilium
+    spec:
+      securityContext:
+        appArmorProfile:
+          type: Unconfined
+      containers:
+      - name: cilium-envoy
+        image: quay.io/cilium/cilium-envoy:v1.29.7-39a2a56bbd5b3a591f69dbca51d3e30ef97e0e51@sha256:bd5ff8c66716080028f414ec1cb4f7dc66f40d2fb5a009fff187f4a9b90b566b
+        imagePullPolicy: IfNotPresent
+        command:
+        - /usr/bin/cilium-envoy-starter
+        args:
+        - --
+        - -c /var/run/cilium/envoy/bootstrap-config.json
+        - --base-id 0
+        - --log-level info
+        - --log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v
+        startupProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          failureThreshold: 105
+          periodSeconds: 2
+          successThreshold: 1
+          initialDelaySeconds: 5
+        livenessProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          periodSeconds: 30
+          successThreshold: 1
+          failureThreshold: 10
+          timeoutSeconds: 5
+        readinessProbe:
+          httpGet:
+            host: 127.0.0.1
+            path: /healthz
+            port: 9878
+            scheme: HTTP
+          periodSeconds: 30
+          successThreshold: 1
+          failureThreshold: 3
+          timeoutSeconds: 5
+        env:
+        - name: K8S_NODE_NAME
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: spec.nodeName
+        - name: CILIUM_K8S_NAMESPACE
+          valueFrom:
+            fieldRef:
+              apiVersion: v1
+              fieldPath: metadata.namespace
+        - name: KUBERNETES_SERVICE_HOST
+          value: 127.0.0.1
+        - name: KUBERNETES_SERVICE_PORT
+          value: '6444'
+        ports:
+        - name: envoy-metrics
+          containerPort: 9964
+          hostPort: 9964
+          protocol: TCP
+        securityContext:
+          seLinuxOptions:
+            level: s0
+            type: spc_t
+          capabilities:
+            add:
+            - NET_ADMIN
+            - SYS_ADMIN
+            drop:
+            - ALL
+        terminationMessagePolicy: FallbackToLogsOnError
+        volumeMounts:
+        - name: envoy-sockets
+          mountPath: /var/run/cilium/envoy/sockets
+          readOnly: false
+        - name: envoy-artifacts
+          mountPath: /var/run/cilium/envoy/artifacts
+          readOnly: true
+        - name: envoy-config
+          mountPath: /var/run/cilium/envoy/
+          readOnly: true
+        - name: bpf-maps
+          mountPath: /sys/fs/bpf
+          mountPropagation: HostToContainer
+      restartPolicy: Always
+      priorityClassName: system-node-critical
+      serviceAccountName: cilium-envoy
+      automountServiceAccountToken: true
+      terminationGracePeriodSeconds: 1
+      hostNetwork: true
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: cilium.io/no-schedule
+                operator: NotIn
+                values:
+                - 'true'
+        podAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchLabels:
+                k8s-app: cilium
+            topologyKey: kubernetes.io/hostname
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchLabels:
+                k8s-app: cilium-envoy
+            topologyKey: kubernetes.io/hostname
+      nodeSelector:
+        kubernetes.io/os: linux
+      tolerations:
+      - operator: Exists
+      volumes:
+      - name: envoy-sockets
+        hostPath:
+          path: /var/run/cilium/envoy/sockets
+          type: DirectoryOrCreate
+      - name: envoy-artifacts
+        hostPath:
+          path: /var/run/cilium/envoy/artifacts
+          type: DirectoryOrCreate
+      - name: envoy-config
+        configMap:
+          name: cilium-envoy-config
+          defaultMode: 256
+          items:
+          - key: bootstrap-config.json
+            path: bootstrap-config.json
+      - name: bpf-maps
+        hostPath:
+          path: /sys/fs/bpf
+          type: DirectoryOrCreate
+

@prehor prehor merged commit 1a4d8dd into main Aug 3, 2024
12 checks passed
@renovate renovate bot deleted the renovate/storage-cilium-1.x branch August 3, 2024 16:47
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant