diff --git a/Dockerfile.docs b/Dockerfile.docs
new file mode 100644
index 0000000000..afb16870bb
--- /dev/null
+++ b/Dockerfile.docs
@@ -0,0 +1,3 @@
+FROM squidfunk/mkdocs-material
+RUN pip install mkdocs-markdownextradata-plugin
+RUN apk add -U git openssh
diff --git a/Makefile b/Makefile
index a0649db7a3..4f3a0ec97d 100644
--- a/Makefile
+++ b/Makefile
@@ -7,6 +7,15 @@ TARGETS := $(shell ls scripts)
@./.dapper.tmp -v
@mv .dapper.tmp .dapper
+serve-docs: mkdocs
+ docker run --net=host --rm -it -v $${PWD}:/docs mkdocs serve
+
+deploy-docs: mkdocs
+ docker run -v $${HOME}/.ssh:/root/.ssh --rm -it -v $${PWD}:/docs mkdocs gh-deploy -r rancher
+
+mkdocs:
+ docker build -t mkdocs -f Dockerfile.docs .
+
$(TARGETS): .dapper
./.dapper $@
diff --git a/README.md b/README.md
index 7ff9df4c05..61bed3be16 100644
--- a/README.md
+++ b/README.md
@@ -1,89 +1,77 @@
-Fleet
-=============
+# Introduction
-### Status: early-ALPHA (actively looking for feedback)
+### Fleet is currently alpha quality and actively being developed.
-![](docs/arch.png)
+![](./docs/arch.png)
+Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight
+enought that is works great for a [single cluster](./single-cluster-install.md) too, but it really shines
+when you get to a large scale. By large scale we mean either a lot of clusters, a lot of deployments, or a lot of
+teams in a single organization.
+
+Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three.
+Regardless of the source all resources are dynamically turned into Helm charts and Helm is used as the engine to
+deploy everything in the cluster. This give a high degree of control, consistency, and auditability. Fleet focuses not only on
+the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster.# Quick Start
+
+# Quick Start
+Who needs documentation, lets just run this thing!
+
+## Install
+
+Get helm if you don't have it. Helm 3 is just a CLI and won't do bad insecure
+things to your cluster.
+
+```
+brew install helm
+```
+
+Install the Fleet Helm charts (there's two because we separate out CRDs for ultimate flexibility.)
+
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ fleet-crd https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-crd-{{fleet.helmversion}}.tgz
+helm -n fleet-system install --create-namespace --wait \
+ fleet https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-{{fleet.helmversion}}.tgz
+```
+
+## Add a Git Repo to watch
+
+Change `spec.repo` to your git repo of choice. Kubernetes manifest files that should
+be deployed should be in `/manifests` in your repo.
+
+```bash
+cat > example.yaml << "EOF"
+apiVersion: fleet.cattle.io/v1alpha1
+kind: GitRepo
+metadata:
+ name: sample
+ # This namespace is special and auto-wired to deploy to the local cluster
+ namespace: fleet-local
+spec:
+ # Everything from this repo will be ran in this cluster. You trust me right?
+ repo: "https://github.com/fleet-demo/simple"
+EOF
+
+kubectl apply -f example.yaml
```
-$ kubectl get fleet
-NAME CLUSTERS-READY CLUSTERS-DESIRED STATUS
-bundle.fleet.cattle.io/helm-download 0 3 NotApplied: 3 (default-bobby-group/cluster-93d18642-217a-486b-9a5d-be06762443b2... )
-bundle.fleet.cattle.io/fleet-agent 3 3
-bundle.fleet.cattle.io/helm-kustomize 0 3 NotApplied: 3 (default-bobby-group/cluster-93d18642-217a-486b-9a5d-be06762443b2... )
-bundle.fleet.cattle.io/helm 0 3 NotApplied: 3 (default-bobby-group/cluster-93d18642-217a-486b-9a5d-be06762443b2... )
-bundle.fleet.cattle.io/kustomize 0 3 NotApplied: 3 (default-bobby-group/cluster-93d18642-217a-486b-9a5d-be06762443b2... )
-bundle.fleet.cattle.io/yaml 0 3 NotApplied: 3 (default-bobby-group/cluster-93d18642-217a-486b-9a5d-be06762443b2... )
+## Get Status
+
+Get status of what fleet is doing
-NAME CLUSTER-COUNT NONREADY-CLUSTERS BUNDLES-READY BUNDLES-DESIRED STATUS
-clustergroup.fleet.cattle.io/othergroup 1 [cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b] 1 6 NotApplied: 5 (helm... )
-clustergroup.fleet.cattle.io/bobby 2 [cluster-93d18642-217a-486b-9a5d-be06762443b2 cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5] 2 12 NotApplied: 10 (helm... )
+```shell
+kubectl -n fleet-local get fleet
+```
+
+You should see something like this get created in your cluster.
+```
+kubectl get deploy frontend
+```
+```
+NAME READY UP-TO-DATE AVAILABLE AGE
+frontend 3/3 3 3 116m
```
-## Introduction
-
-Fleet is a Kubernetes cluster fleet controller specifically designed to address the challenges of running
-thousands to millions of clusters across the world. While it's designed for massive scale the concepts still
-apply for even small deployments of less than 10 clusters. Fleet is lightweight enough to run on the smallest of
-deployments too and even has merit in a single node cluster managing only itself. The primary use case of Fleet is
-to ensure that deployments are consistent across clusters. One can deploy applications or easily enforce standards
-such as "every cluster must have X security tool installed."
-
-Fleet has two simple high level concepts: cluster groups and bundles. Bundles are collections of resources that
-are deployed to clusters. Bundles are defined in the fleet controller and are then deployed to target cluster using
- selectors and per target customization. While bundles can be deployed to any cluster using powerful selectors,
- each cluster is a member of one cluster group. By looking at the status of bundles and cluster groups one can
- get a quick overview of that status of large deployments. After a bundle is deployed it is then constantly monitored
- to ensure that its Ready and resource have not been modified.
-
- A bundle can be plain Kubernetes YAML, Helm, or kustomize based. Helm and kustomize can be combined to create very
- powerful workflows too. Regardless of the approach chosen to create bundles all resources are deployed to a cluster as
- helm charts. Using Fleet to manage clusters means all your clusters are easily auditable because every resource is
- carefully managed in a chart and a simple `helm -n fleet-system ls` will give you an accurate overview of what is
- installed.
-
-Combining Fleet with a Git based workflow like Github Actions one can automate massive scale with ease.
-
-## Documentation
-
-1. [Understanding Bundles](./docs/bundles.md) - Very important read
-1. [Example Bundles](./docs/examples.md)
-1. [CLI](./docs/cli.md)
-1. [Architecture and Installation](./docs/install.md)
-1. [GitOps and CI/CD](./docs/gitops.md)
-
-## Quick Start
-
-1. Download `fleet` CLI from [releases](https://github.com/rancher/fleet/releases/latest).
- Or run
- ```bash
- curl -sfL https://raw.githubusercontent.com/rancher/fleet/master/install.sh | sh -
- ```
-
-2. Install Fleet Manager on Kubernetes cluster. The `fleet` CLI will use your current `kubectl` config
- to access the cluster.
- ```shell
- # Kubeconfig should point to CONTROLLER cluster
- fleet install manager | kubectl apply -f -
- ```
-3. Generate cluster group token to register clusters
- ```shell script
- # Kubeconfig should point to CONTROLLER cluster
- fleet install agent-token > token
- ```
-4. Apply token to clusters to register
- ```shell script
- # Kubeconfig should point to AGENT cluster
- kubectl apply -f token
- ```
-5. Deploy some bundles
- ```shell script
- # Kubeconfig should point to CONTROLLER cluster
- fleet apply ./examples/helm-kustomize
- ```
-6. Check status
- ```shell script
- kubectl get fleet
- ```
+Enjoy and read the [docs](https://fleet.rancher.io/).
diff --git a/charts/fleet/templates/rbac.yaml b/charts/fleet/templates/rbac.yaml
index 0604ebc9e6..59df51b1ff 100644
--- a/charts/fleet/templates/rbac.yaml
+++ b/charts/fleet/templates/rbac.yaml
@@ -26,16 +26,9 @@ rules:
- ""
resources:
- secrets
- verbs:
- - '*'
-- apiGroups:
- - ""
- resources:
- configmaps
verbs:
- - list
- - watch
- - get
+ - '*'
- apiGroups:
- rbac.authorization.k8s.io
resources:
diff --git a/docs/CNAME b/docs/CNAME
new file mode 100644
index 0000000000..fb5693e0d1
--- /dev/null
+++ b/docs/CNAME
@@ -0,0 +1 @@
+fleet.rancher.io
diff --git a/docs/agent-initiated.md b/docs/agent-initiated.md
new file mode 100644
index 0000000000..1467c24d24
--- /dev/null
+++ b/docs/agent-initiated.md
@@ -0,0 +1,71 @@
+# Agent Initiated
+
+Refer to the [overview page](./cluster-overview.md#agent-initiated-registration) for a background information on the agent initiated registration style.
+
+## Cluster Registration Token and Client ID
+
+An downstream cluster is registered using two pieces of information, the **cluster registration token** and the **client ID**.
+
+The **cluster registration token** is a credential that will authorize the downstream cluster agent to be
+able to initiate the registration process. Refer to the [cluster registration token page](./cluster-tokens.md) for more information
+on how to create tokens and obtain the values. The cluster registration token is manifested as a `values.yaml` file that will
+be passed to the `helm install` process.
+
+The **client ID** is a unique string that will identify the cluster. This string is user generated and opaque to the Fleet manager and
+agent. It is only assumed to be sufficiently unique. For security reason one should probably not be able to easily guess this value
+as then one cluster could impersonate another. The client ID is optional and if not specific the UID field of the `kube-system` namespace
+resource will be used as the client ID. Upon registration if the client ID is found on a `Cluster` resource in the Fleet manager it will associate
+the agent with that `Cluster`. If no `Cluster` resource is found with that client ID a new `Cluster` resource will be created with the specific
+client ID. Client IDs are mostly important such that when a cluster is registered it can immediately be identified, assigned labels, and git
+repos can be deployed to it.
+
+## Install agent for registration
+
+The Fleet agent is installed as a Helm chart. The only parameters to the helm chart installation should be the cluster registration token, which
+is represented by the `values.yaml` file and the client ID. The client ID is optional.
+
+First follow the [cluster registration token page](./cluster-tokens.md) to obtain the `values.yaml` file to be used.
+
+Second setup your environment to use use a client ID.
+
+```shell
+# If no client ID is going to be used then leave the value blank
+CLUSTER_CLIENT_ID="a-unique-value-for-this-cluster"
+```
+
+Finally, install the agent using Helm.
+
+!!! hint "Use proper namespace and release name"
+ For the agent chart the namespace must be `fleet-system` and the release name `fleet-agent`
+
+!!! hint "Ensure you are installing to the right cluster"
+ Helm will use the default context in `${HOME}/.kube/config` to deploy the agent. Use `--kubeconfig` and `--kube-context`
+ to change which cluster Helm is installing to.
+
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ --set clientID="${CLUSTER_CLIENT_ID}" \
+ --values values.yaml \
+ fleet-agent https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-agent-{{fleet.helmversion}}.tgz
+```
+
+The agent should now be deployed. You can check that status of the fleet pods by running the below commands.
+
+```shell
+# Ensure kubectl is pointing to the right cluster
+kubectl -n fleet-system logs -l app=fleet-agent
+kubectl -n fleet-system get pods -l app=fleet-agent
+```
+
+Additionally you should see a new cluster registered in the Fleet manager. Below is an example of checking that a new cluster
+was registered in the `clusters` [namespace](./namespaces.md). Please ensure your `${HOME}/.kube/config` is pointed to the Fleet
+manager to run this command.
+
+```shell
+kubectl -n clusters get clusters.fleet.cattle.io
+```
+```
+NAME BUNDLES-READY NODES-READY SAMPLE-NODE LAST-SEEN STATUS
+cluster-ab13e54400f1 1/1 1/1 k3d-cluster2-server-0 2020-08-31T19:23:10Z
+```
+
diff --git a/docs/architecture.md b/docs/architecture.md
new file mode 100644
index 0000000000..311ebe6f77
--- /dev/null
+++ b/docs/architecture.md
@@ -0,0 +1,45 @@
+# Architecture
+
+![](./arch.png)
+
+Fleet has two primary components. The Fleet manager and the cluster agents. These
+components work in a two-stage pull model. The Fleet manager will pull from git and the
+cluster agents will pull from the Fleet manager.
+
+## Fleet Manager
+
+The Fleet manager is a set of Kubernetes controllers running in any standard Kubernetes
+cluster. The only API exposed by the Fleet manages is the Kubernetes API, there is no
+custom API for the fleet controller.
+
+## Cluster Agents
+
+One cluster agent runs in each cluster and is responsible for talking to the Fleet manager.
+The only communication from cluster to Fleet manager is by this agent and all communication
+goes from the managed cluster to the Fleet manager. The fleet manager does not initiate
+connections to downstream clusters. This means managed clusters can run in private networks and behind
+NATs. The only requirement is the cluster agent needs to be able to communicate with the
+Kubernetes API of the cluster running the Fleet manager. The one exception to this is if you use
+the [manager initiated](./manager-initiated.md) cluster registration flow. This is not required, but
+an optional patter.
+
+The cluster agents are not assumed to have an "always on" connection. They will resume operation as
+soon as they can connect. Future enhancements will probably add the ability to schedule times of when
+the agent checks in, as it stands right now they will always attempt to connect.
+
+## Security
+
+The Fleet manager dynamically creates service account, manages their RBAC and then gives the
+tokens to the downstream clusters. Clusters are registered by optionally expiring cluster registration tokens.
+The cluster registration token is used only during the registration process to generate a credential specific
+to that cluster. After the cluster credential is established the cluster "forgets" the cluster registration
+ token.
+
+The service accounts given to the clusters only have privileges to list `BundleDeployment` in the namespace created
+specifically for that cluster. It can also update the `status` subresource of `BundleDeployment` and the `status`
+subresource of it's `Cluster` resource.
+
+## Scalability
+
+Fleet is designed to scale up to 1 million clusters. There are more details to come here on how we expect to scale
+a Kubernetes controller based architecture to 100's of millions of objects and beyond.
\ No newline at end of file
diff --git a/docs/assets/logo.svg b/docs/assets/logo.svg
new file mode 100644
index 0000000000..07fc4af5bb
--- /dev/null
+++ b/docs/assets/logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/docs/bundleflow.drawio b/docs/bundleflow.drawio
deleted file mode 100644
index 8e4b9899d6..0000000000
--- a/docs/bundleflow.drawio
+++ /dev/null
@@ -1 +0,0 @@
-3Vhtb9owEP41+dgqLwTaj5R2q7RVqla1a/fNkEvi1slljoGwXz87cRICCQoDVDSEhP34cj77Xp4LhjOJsq+cJOEDesAM2/Qyw7k1bNsa2Lahvqa3KpBra1QAAaeeFqqBJ/oHNGhqdE49SBuCApEJmjTBGcYxzEQDI5zjsinmI2vumpAAtoCnGWHb6E/qibBAr1yzxu+BBmG5s2XqlYiUwhpIQ+Lhcg1y7gxnwhFFMYqyCTB1eeW9FM996VitDOMQiz4P/Hp+B3OIL37yYwCT59eX13R2MdC2iVV5YPDk+fUUuQgxwJiwuxq94TiPPVBaTTmrZb4jJhK0JPgOQqy0M8lcoIRCETG9ChkVr2vjN6Xq0tWz20xrzicrPSnsVMZ1Hl9DKc75DHacuQwjwgMQO+TsykkyugEjEHwln+PAiKCLph1Eh1lQydWekAPtjD0cMyz0Lgib650Me8ikuTc+ygOvu2z4e47lwkWaX/pYClhuktWLchSo37GnXKdVScsKbcXaViwIyETTdYTRIJbjmbx14BJYABdUpstYL0TU84ooAWkKmeaqlAMTpLHIb8m9MdxbiTAyBXZDZh9BHlETZMjzfR0//1QWqT0gM1rSXeuvk2w9HroDftupWvuFeWk7Q50UvR2t1T2qE66JoO+nMsA2I6Ha9d+Dw+oMDo8u+sSGvKiW2Ng3wtq1fJtPgccgZNXuCrMKzs1tomdwggcSUx9SUR1gylts73OkjXxahlTAU0Ly8rSUhLmRXGlSUJhPM1Vfu8N/q+x1RnTFR5p/HUfPlzWbWSXZhk0mO1FpG30m59Q887ZOM6fmHLsn5zgHck7+6JhzsloT0KV3u1LpGHFHGzFy5TY7if3k5aCw4Kg17+rQmmcNj1Ex2rU8YipOWe1OajvHGaTpjq6gV237D3uF0c7KKluFwWDQSATrsMahbEKs6zatp28r7HNuK+6BRQfz8dmcZhISvm/FOLCZ8CiXAEWVeZKvFPkfo8FwLPfcGgzn4Henrs42FRgpoZ7vT5/Q77nO2fV7peKjv8tOOBCheMuUQW2bHkQkPuDdNhUcP6CklhhjRU0+ZWwD6s9rbf5vdrLHSEC7mYADt8XjZovH7f0dLqf1H1YF7dR/+zl3fwE=
\ No newline at end of file
diff --git a/docs/bundles.md b/docs/bundles.md
index cefe04ff5b..e30f1e3273 100644
--- a/docs/bundles.md
+++ b/docs/bundles.md
@@ -1,167 +1,186 @@
-Understanding Bundles
-=====================
-
-Bundles are a collection of resources that include additional deployment specific information and can be deployed
-to multiple clusters. Each bundle is one [custom resource](../pkg/apis/fleet.cattle.io/v1alpha1/bundle.go) that fully
-encapsulates all this information. Because the type can grow quite large and has various rules and customizations in
-it, the `fleet` CLI helps improve the user experience of managing this resources. Instead of managing one large YAML file
-that has nested YAML files in it, `fleet` allows you to represent your bundle on disk as a series of individual files and
-then running `fleet apply` will construct the bundle resource and deploy it to the fleet controller. Bundles go through a
-powerful process of how they are rendered into Kubernetes resources. This process allows one to choose a Kubernetes YAML,
-Helm or Kustomize based approach. All three of these approaches can be combined too. Combining helm with kustomize
-often ends up being a great way to manage third party applications. Using plain Kubernetes YAML is typically a more
-lightweight approach to manage internal applications.
-
-## Bundle Layout
+# Understanding Bundles
+
+Each registered `GitRepo` that is monitored can produce one or more [bundles](./concepts.md). The bundle is a
+collection of resources that contains resource that will be deployed to one or more clusters and can be customized
+per target. This means the structure of the monitored git repository is the same as the structure of a bundle or
+a directory structure of multiple bundles. The location of the bundles is specified in the `GitRepo` type with
+the `spec.bundleDirs` field. By default the value is `./`.
+
+## Directory structure
+
+The directory structure of a single bundle will look like the below depending on your use case.
```
-./bundle.yaml # Bundle descriptor
+./fleet.yaml # Bundle descriptor (optional)
./manifests/ # Directory for raw kubernetes YAML
./chart/ # Directory for an inline Helm Chart
./kustomize/ # Directory for kustomization resources
-./overlays/${OVERLAY_NAME} # Directory for kustomization resources
+./overlays/${OVERLAY_NAME} # Directory for customize raw Kubernetes YAML resources
```
-This layout is used for on disk for the `fleet` command to read and is also the expected structure of embedded resources
-in the bundle custom resource.
+## Bundle Rending
-## Bundle Strategies
+One can choose between using raw Kubernetes YAML, Helm, Kustomize, or some combination of the three. Regardless of
+ the strategy you use, you should
+understand all three approaches to understand the capabilities of the system. Refer to [examples](examples.md) for
+specific examples.
-One can choose between using plain Kubernetes YAML, Helm, Kustomize, or some combination of the three. In depth
-documentation on each approach can be found at the below links. Regardless of the strategy you use, you should
- understand all three approaches to understand the capabilities of the system. Refer to [examples](examples.md) for
- specific examples.
+### Render Pipeline
-## bundle.yaml
+![](bundleflow.png)
-The `bundle.yaml` on disk is the same format as the bundle `spec` field and just removes some
-annoying boilerplate. Below is a quick reference of every field with inline comments to how they
-are used.
+A bundle has three types of resources in it. Plain kubernetes manifests are available in `manifest/`. Chart files for
+a helm chart are available in `chart/`. Finally, kustomize files are in `kustomize/`. Each one of these directories
+and content types are optional but combined to create one helm chart that is to be deployed to the cluster. Each
+content type is optional, but regardless of what type of input is chosen the final asset is always a Helm chart.
+Helm is core to the [architecture](./architecture.md) of Fleet, but this does not mean you are required to author
+Helm charts yourself. You can choose a pure Kubernetes YAML or Kustomize approach.
-```yaml
-# Used to populate metadata.name in the Bundle custom resource
-name: mybundle
+#### Phase 1: Plain Kubernetes YAML
-# Used to populate metadata.labels in the Bundle custom resource. Currently there is no specific use of labels
-# by Fleet and is here just to allow the users to add additional metadata to bundles.
+Any resource that is found in `manifests/` will be copied to the target chart in the `chart/templates/` folder. This
+means these files can be plain YAML or have helm golang templating.
+
+#### Phase 2: Helm Chart generation
+
+The `chart/` folder is expected to have Helm chart content in it. If this folder is not found then a chart will
+be generated on demand. This means a `Chart.yaml` will be created for you if not found. Since content from
+`manifests/` is copied to `chart/templates`, one can deploy helm charts without knowning anything about helm, instead
+ using an approach closer to `kubectl apply`.
+
+#### Phase 3: Kustomize Post Process
+
+After the Helm chart from phase 2 is rendered, Fleet is called as a post renderer to apply run kustomize. The
+`kustomizeDir` field from the target or overlays can be used to determine which `kustomization.yaml` is invoked.
+The objects generated by Helm are put into a field named `${kustomizeDir}/manifests.yaml` and the `kustomization.yaml`
+found in `kustomizeDir` is dynamically modified to add `manifests.yaml` to the `resources:` list.
+
+## fleet.yaml
+
+A bundle is defined as optionally containing a `fleet.yaml` file at it's root. Again, this file is optional, but
+for many multi-cluster use cases it is fairly essential. The structure of the `fleet.yaml` is essentially the
+same structure as the `spec` field of the `Bundle` custom resource definition. Below is a reference of all fields
+and how they are used. Depending on the which style (single-cluster or multi-cluster) and rendered (raw YAML, Helm,
+or Kustomize) you are using the fields may or may not be applicable.
+
+```yaml
+# Used to populate metadata.labels in the Bundle custom resource. The labels of
+# a bundle are important if you wish to use the BundleNamespaceMapping approach
+# for common configuration management.
labels:
custom: value
-# Used to populate metadata.annotations in the Bundle custom resource. Currently there is no specific use of annotations
-# by Fleet and is here just to allow the users to add additional metadata to bundles.
+# Used to populate metadata.annotations in the Bundle custom resource. Currently
+# there is no specific use of annotations by Fleet and is here just to allow the
+# users to add additional metadata to bundles.
annotations:
custom: value
-# Use a custom folder for plain Kubernetes YAML files. This can also refer to a URL to download
-# resource from. This uses Hashicorp's go-getter, so any support source (http, git, S3) should work.
+# Use a custom folder for plain Kubernetes YAML files. This can also refer to a
+# URL to download resource from. This uses Hashicorp's go-getter, so any support
+# source (http, git, S3) should work.
# Default: manifests
manifestsDir: ./manifests
-# Use a custom folder for kustomize resources. This can also refer to a URL to download resource from, similar to
-# the manifestDir field
+# Use a custom folder for kustomize resources. This can also refer to a URL to
+# download resource from, similar to the manifestDir field
# Default: kustomize
kustomizeDir: ./kustomize
-# Use a custom source for chart resources. This is commonly a URL pointing to the chart tgz file. Similar to the
-# the manifestDir field any go-getter URL is supported.
+# Use a custom source for chart resources. This is commonly a URL pointing to
+# the chart tgz file. Similar to the the manifestDir field any go-getter URL
+# is supported.
# Default: chart
chart: ./chart
-# The default namespace to be applied to resources. This field is not used to enforce or lock down the deployment
-# a specific namespace. It is only used as a default when a namespaced resource does not specify a namespace.
+# The default namespace to be applied to resources. This field is not used to
+# enforce or lock down the deployment to a specific namespace, but instead
+# provide the default value of the namespace field if one is not specified
+# in the manifests. If you wish to actually restrict the namespace use then
+# that should be done using the RBAC of the service account assigned to the
+# GitRepo
# Default: default
-defaultNamespace: default
+namespace: default
-# When resources are applied the system will wait for the resources to initially become Ready. If the resources are
-# not ready in this timeframe the application of resources fails and the bundle will stay in a NotApplied state.
+# When resources are applied the system will wait for the resources to initially
+# become Ready. If the resources are not ready in this time frame the
+# application of resources fails and the bundle will stay in a NotApplied state.
# Default: 600 (10 minutes)
timeoutSeconds: 600
-# Default values to be based to Helm upon installation.
+# Default values to be based to Helm upon installation. The structure of this
+# field is the same structure that would be in the values.yaml file.
# Default: null
values:
image: custom/value:latest
-# A paused bundle will not update downstream clusters but instead mark the bundle as OutOfSync
+# A paused bundle will not update downstream clusters but instead mark the bundle
+# as OutOfSync. On can the manually confirm that a bundle should be deployed to
+# the downstream clusters.
# Default: false
paused: false
rolloutStrategy:
- # A number or percentage of clusters that can be unavailable during an update of a bundle. This follows the same
- # basic approach as a deployment rollout strategy
+ # A number or percentage of clusters that can be unavailable during an update
+ # of a bundle. This follows the same basic approach as a deployment rollout
+ # strategy.
+ # default: 10%
maxUnavailable: 15%
-
-# Base resources for this bundle. All targets will inherit this content. The content is typically not manually
-# managed but instead populated by the fleet CLI. The name fields should be paths relative to the bundle root. For
-# example, name: chart/Chart.yaml should be used if you are embedding a chart. If it does not have the chart/ prefix
-# it will not be recognized as a chart. The fleet CLI will read the directories specified by manifestsDir, kustomizeDir,
-# and chart, strip the custom prefix, and normalize the paths to manifests/, kustomize/, chart/ respectively. The
-# paths of resources must match the bundle layout specified above.
-resources:
-# The name of this resource. If you do not put a resources it will get an auto generated name of the format
-# manifests/file000
-- name: chart/chart.yaml
- # the encoding of the value field. If this value is blank it is assumed to be valid UTF-8 content. Other supported
- # values are:
- # base64 - base64'd content
- # base64+gz - gzip and then base64'd content
- # default:
- encoding:
- # The content of this resource following the encoding set above
- content: |
- name: chartname
- version: v0.1
-
-# Overlays contain customization to resources and options that can be references by targets to compose a specific
-# configuration for a target
-overlays:
-# The name of the overlay. This field is referenced by the targets.
-- name: custom1
- # Overlays can reference other overlays. The referenced overlays will be applied before this overlay.
- overlays:
- - custom2
- - custom3
- # Override defaultNamespace
- defaultNamespace: newvalue
- # Override the base dir where the kustomization.yaml is found
- kustomizedDir: production/
- # Override the timeoutSeconds parameter
- timeoutSeconds: 5
- # Merge in new values used by Helm. The merge logic follows the logic of how Helm merges values, which is basically
- # just a map merge and list are overwritten.
- values:
- custom: value
- # Resources to overwrite or patch. The fleet command will populate these resources with the contents of ./overlays/${NAME}
- # The names here are relative to the ./overlays/${NAME} root, not the bundle root. The names here should match the
- # name specified in the base of the bundle.
- resources:
- - name: manifests/deployment.yaml
- content: |
- kind: Deployment
- ....
-
-# Targets are used to match clusters that should be deployed to. Each target can specify a series of overlays to apply
-# customizations for that cluster. Targets are evaluated in order and the first one to match is used
+ # A number or percentage of cluster partitions that can be unavailable during
+ # an update of a bundle.
+ # default: 0
+ maxUnavailablePartitions: 20%
+ # A number of percentage of how to automatically partition clusters if not
+ # specific partitioning strategy is configured.
+ # default: 25%
+ autoPartitionSize: 10%
+ # A list of definitions of partitions. If any target clusters do not match
+ # the configuration they are added to partitions at the end following the
+ # autoPartitionSize.
+ partitions:
+ # A user friend name given to the partition used for Display (optional).
+ # default: ""
+ - name: canary
+ # A number or percentage of clusters that can be unavailable in this
+ # partition before this partition is treated as done.
+ # default: 10%
+ maxUnavailable: 10%
+ # Selector matching cluster labels to include in this partition
+ clusterSelector:
+ matchLabels:
+ env: prod
+ # A cluster group name to include in this partition
+ clusterGroup: agroup
+ # Selector matching cluster group labels to include in this partition
+ clusterGroupSelector: agroup
+
+# Targets are used to match clusters that resources should be configured for.
+# Each target can specify a series of overlays to apply customizations for
+# that cluster. Targets are evaluated in order and the first one to match is used
targets:
-# The name of target. If not specified a default name of the format "target000" will be used
+# The name of target. If not specified a default name of the format "target000"
+# will be used
- name: prod
- # Override defaultNamespace
- defaultNamespace: newvalue
+ # Override namespace
+ namespace: newvalue
# Override the base dir where the kustomization.yaml is found
+ # Please note this directory is relative to ./kustomize
kustomizedDir: production/
# Override the timeoutSeconds parameter
timeoutSeconds: 5
- # Merge in new values used by Helm. The merge logic follows the logic of how Helm merges values, which is basically
- # just a map merge and list are overwritten.
+ # Merge in new values used by Helm. The merge logic follows the logic of how Helm
+ # merges values, which is basically just a map merge and list are overwritten.
values:
custom: value
- # Overlays to be applied on this target in the specified order.
+ # Overlays to be applied on this target in the specified order. The names
+ # of the overlays correspond to the directory names in the ./overlays folder.
overlays:
- custom2
- custom3
- # A selector used to match clusters. The structure is the standard metav1.LabelSelector format.
- # If clusterGroupSelector or clusterGroup is specified, clusterSelector will be used only to further refine the
- # selection after clusterGroupSelector and clusterGroup is evaluated.
+ # A selector used to match clusters. The structure is the standard
+ # metav1.LabelSelector format. If clusterGroupSelector or clusterGroup is specified,
+ # clusterSelector will be used only to further refine the selection after
+ # clusterGroupSelector and clusterGroup is evaluated.
clusterSelector:
matchLabels:
env: prod
@@ -175,15 +194,20 @@ targets:
## Target Matching
-All clusters in all cluster groups in the same namespace as the bundles will be evaluated against all bundle targets.
+All clusters and cluster groups in the same namespace as the `GitRepo`/`Bundle` will be evaluated against all bundle targets.
The targets list is evaluated one by one and the first target that matches is used for that bundle for that cluster. If
-no match is made, the bundle will not be deployed to the cluster. There are three approaches to matching clusters.
+no match is made, then no customizations will be applied. The superset of all valid targets for a bundle is
+set in the definition of the `GitRepo`. The `target` definitions in the bundle are only used to decide what configuration
+to apply to the target, where as the actual matching of whether a cluster should be deployed to or not is determined
+by the definition of the `GitRepo`.
+
+There are three approaches to matching clusters.
One can use cluster selectors, cluster group selectors, or an explicit cluster group name. All criteria is additive so
the final match is evaluated as "clusterSelector && clusterGroupSelector && clusterGroup". If any of the three have the
default value it is dropped from the criteria. The default value is either null or "". It is important to realize
that the value `{}` for a selector means "match everything."
-```shell script
+```yaml
# Match everything
clusterSelector: {}
# Selector ignored
@@ -193,11 +217,11 @@ clusterSelector: null
## Resource Overlays and Patching
A target references a series of overlays and those overlay can have resources in them. The resource overlay content
-using a file based approach. This is different from kustomize which uses a resource based approach. In kustomize
+uses a file name based approach. This is different from kustomize which uses a resource based approach. In kustomize
the resource Group, Kind, Version, Name, and Namespace identify resources and are then merged or patched. For Fleet
-the overlay resources will override or patch content based on the file name.
+the overlay resources will override or patch content with a matching file name.
-```shell script
+```shell
# Base files
manifests/deployment.yaml
manifests/svc.yaml
@@ -216,33 +240,4 @@ will be replaced with `.` from the file name and that will be used as the target
will target `deployment.yaml`. The patch will be applied using JSON Merge, Strategic Merge Patch, or JSON Patch.
Which strategy is used is based on the file content. Even though JSON strategies are used, the files can be written
using YAML syntax.
-
-## Render Pipeline
-
-![](bundleflow.png)
-
-A bundle has three types of resources in it. Plain kubernetes manifests are available in `manifest/`. Chart files for
-a helm chart are available in `chart/`. Finally, kustomize files are in `kustomize/`. Each one of these directories
-and content types are optional but combined to create one helm chart that is to be deployed to the cluster. Since
-each content type is optional, regardless of the fact that the final asset is a Helm chart, a pure Kubernetes YAML or
-kustomize approach is possible.
-
-### Phase 1: Plain Kubernetes YAML
-
-Any resource that is found in `manifests/` will be copied to the target chart in the `chart/templates/` folder. This
-means these files can be plain YAML or have helm golang templating.
-
-### Phase 2: Helm Chart generation
-
-The `chart/` folder is expected to have Helm chart content in it. If this folder is not found then a chart will
-be generated on demand. This means a `Chart.yaml` will be created for you if not found. Since content from
-`manifests/` is copied to `chart/templates`, one can deploy helm charts without knowning anything about helm, instead
- using an approach closer to `kubectl apply`.
-
- ### Phase 3: Kustomize Post Process
-
- After the Helm chart from phase 2 is rendered, Fleet is called as a post renderer to apply run kustomize. The
-`kustomizeDir` field from the target or overlays can be used to determine which `kustomization.yaml` is invoked.
-The objects generated by Helm are put into a field named `${kustomizeDir}/manifests.yaml` and the `kustomization.yaml`
-found in `kustomizeDir` is dynamically modified to add `manifests.yaml` to the `resources:` list.
diff --git a/docs/cli.md b/docs/cli.md
deleted file mode 100644
index 40185c367e..0000000000
--- a/docs/cli.md
+++ /dev/null
@@ -1,64 +0,0 @@
-fleet
-===
-
-Fleet is exposed as a pure Kubernetes API using Custom Resources. The `fleet` is used
-only as a way to enhance the experience of interacting with the Bundle custom resources.
-
-## fleet apply [BUNDLE_DIR...]
-
-The apply command will render a bundle resource and then apply it to the cluster. The
-`-o` flag can be used to not apply the resulting YAML but instead save it to a file
-or standard out (`-`).
-
-```
-Render a bundle into a Kubernetes resource and apply it in the Fleet Manager
-
-Usage:
- fleet apply [flags]
-
-Flags:
- -b, --bundle-file string Location of the bundle.yaml
- -c, --compress Force all resources to be compress
- -f, --file string Read full bundle contents from file
- -h, --help help for apply
- -o, --output string Output contents to file or - for stdout
-
-Global Flags:
- -k, --kubeconfig string kubeconfig for authentication
- -n, --namespace string namespace (default "default")
-```
-
-## fleet test [BUNDLE_DIR]
-
-The test command is used to simulate matching clusters and rendering the output. The
-entire bundle pipeline will be executed. This means helm and kustomize will be evaluated.
-For helm, this is the equivalent of running `helm template` with the same caveauts. That
-being that anything that dynamically looks at the cluster will not be proper. In general
-this type of logic should be avoided in most cases.
-
-```
-Match a bundle to a target and render the output
-
-Usage:
- fleet test [flags]
-
-Flags:
- -b, --bundle-file string Location of the bundle.yaml
- -g, --group string Cluster group to match against
- -L, --group-label strings Cluster group labels to match against
- -h, --help help for test
- -l, --label strings Cluster labels to match against
- --print-bundle Don't run match and just output the generated bundle
- -q, --quiet Just print the match and don't print the resources
- -t, --target string Explicit target to match
-
-Global Flags:
- -k, --kubeconfig string kubeconfig for authentication
- -n, --namespace string namespace (default "default")
-
-```
-
-## fleet install ...
-
-The install command is for installing the fleet controller and registering clusters
-with Fleet. This command is covered in detail in the [installation documentation](./install.md).
diff --git a/docs/cluster-group.md b/docs/cluster-group.md
new file mode 100644
index 0000000000..2a47d76503
--- /dev/null
+++ b/docs/cluster-group.md
@@ -0,0 +1,22 @@
+# Cluster Groups
+
+Clusters in a namespace can be put into a cluster group. A cluster group is essentially a named selector.
+The only parameter for a cluster group is essentially the selector.
+When you get to a specific scale the only reasonable way to manage clusters is by defining clusters groups.
+Cluster groups serve the purpose of giving aggregated
+status of the deployments and then also a simpler way to manage targets.
+
+A cluster group create by create a `ClusterGroup` resource like below
+
+```yaml
+kind: ClusterGroup
+apiVersion: {{fleet.apiVersion}}
+metadata:
+ name: production-group
+ namespace: clusters
+spec:
+ # This is the standard metav1.LabelSelector format to match clusters by labels
+ selector:
+ matchLabels:
+ env: prod
+```
diff --git a/docs/cluster-overview.md b/docs/cluster-overview.md
new file mode 100644
index 0000000000..458d6f81d6
--- /dev/null
+++ b/docs/cluster-overview.md
@@ -0,0 +1,25 @@
+# Overview
+
+There are two specific styles to registering clusters. These styles will be referred
+to as **agent initiated** and **manager initiated** registration. Typically one would
+go with the agent initiated registration but there are specific use cases in which
+manager initiated is a better workflow.
+
+## Agent Initiated Registration
+Agent initiated refers to a pattern in which the downstream cluster installs an agent with a
+[cluster registration token](./cluster-tokens.md) and optionally a client ID. The cluster
+agent will then make a API request to the Fleet manager and initiate the registration process. Using
+this process the Manager will never make an outbound API request to the downstream clusters and will thus
+never need to have direct network access. The downstream cluster only needs to make outbound HTTPS
+calls to the manager.
+
+## Manager Initiated Registration
+
+Manager initiated registration is a process in which you register an existing Kubernetes cluster
+with the Fleet manager and the Fleet manager will make an API call to the downstream cluster to
+deploy the agent. This style can place additional network access requirements because the Fleet
+manager must be able to communicate with the download cluster API server for the registration process.
+After the cluster is registered there is no further need for the manager to contact the downstream
+cluster API. This style is more compatible if you wish to manage the creation of all your Kubernetes
+clusters through GitOps using something like [cluster-api](https://github.com/kubernetes-sigs/cluster-api)
+or [Rancher](https://github.com/rancher/rancher).
\ No newline at end of file
diff --git a/docs/cluster-tokens.md b/docs/cluster-tokens.md
new file mode 100644
index 0000000000..dacec46cc7
--- /dev/null
+++ b/docs/cluster-tokens.md
@@ -0,0 +1,55 @@
+# Cluster Registration Tokens
+
+!!! hint "Unneeded for Manager initiated registration"
+ For manager initiated registrations the token is managed by the Fleet manager and does
+ not need to be manually created an obtained.
+
+For an agent initiated registration the downstream cluster must have a cluster registration token.
+Cluster registration tokens are used to establish a new identity for a cluster. Internally
+cluster registration tokens are managed by creating Kubernetes service accounts that have the
+permissions to create `ClusterRegistrationRequest`s within a specific namespace. Once the
+cluster is registered a new `ServiceAccount` is created for that cluster that is used as
+the unique identity of the cluster. The agent is designed to forget the cluster registration
+token after registration. While the agent will not maintain a reference to the cluster registration
+token after a successful registration please note that usually other system bootstrap scripts do.
+
+Since the cluster registration token is forgotten, if you need to re-register a cluster you must
+give the cluster a new registration token.
+
+## Token TTL
+
+Cluster registration tokens can be reused by any cluster in a namespace. The tokens can be given a TTL
+such that it will expire after a specific time.
+
+## Create a new Token
+
+First you must understand how [namespaces](./namespaces.md) are used in the Fleet manager as the
+`ClusterRegistationToken` is a namespaced type.
+The cluster registration tokens are managed with the `ClusterRegistrationToken` type. Create a new
+token with the below YAML.
+
+```yaml
+kind: ClusterRegistrationToken
+apiVersion: "{{fleet.apiVersion}}"
+metadata:
+ name: new-token
+ namespace: clusters
+spec:
+ # The number of seconds this token is valid after creation. A value <= 0 means infinite time.
+ ttlSeconds: 604800
+```
+
+## Obtaining Token Value (Agent values.yaml)
+
+The token value is the contents of a `values.yaml` file that is expected to be passed to `helm install`
+to install the Fleet agent on a downstream cluster. The token is stored in a Kubernetes secret referenced
+by the `status.secretName` field on the newly created `ClusterRegistrationToken`. In practice the secret
+name is always the same as the `ClusterRegistrationToken` name. The contents will be in
+the secret's data key `values`. To obtain the `values.yaml` content for the above example YAML one can
+run the following one-liner.
+
+```shell
+kubectl -n clusters get secret new-token -o 'jsonpath={.data.values}' | base64 -d > values.yaml
+```
+
+This `values.yaml` file can now be used repeatedly by clusters to register until the TTL expires.
diff --git a/docs/concepts.md b/docs/concepts.md
new file mode 100644
index 0000000000..5527eb78bc
--- /dev/null
+++ b/docs/concepts.md
@@ -0,0 +1,27 @@
+# Core Concepts
+
+Fleet is fundamentally a set of Kubernetes custom resource definitions (CRDs) and controllers
+to manage GitOps for a single Kubernetes cluster or a large scale deployments of Kubernetes clusters
+(up to one million). Below are some of the concepts of Fleet that will be useful through out this documentation.
+
+* **Fleet Manager**: The centralized component that orchestrates the deployments of Kubernetes assets
+ from git. In a multi-cluster setup this will typically be a dedicated Kubernetes cluster. In a
+ single cluster setup the Fleet manager will be running on the same cluster you are managing with GitOps.
+* **Fleet controller**: The controller(s) running on the Fleet manager orchestrating GitOps. In practice
+ Fleet manager and Fleet controllers is used fairly interchangeably.
+* **Single Cluster Style**: This is a style of installing Fleet in which the manager and downstream cluster are the
+ same cluster. This is a very simple pattern to quickly get up and running with GitOps.
+* **Multi Cluster Style**: This is a style of running Fleet in which you have a central manager that manages a large
+ number of downstream clusters.
+* **Fleet agent**: Ever managed downstream cluster will run an agent that communicates back to the Fleet manager.
+ This agent is just another set of Kubernetes controllers running in the downstream cluster.
+* **GitRepo**: Git repositories that are watched by Fleet are represented by the type `GitRepo`.
+* **Bundle**: When a `GitRepo` is scanned it will produce one or more bundles. Bundles are a collection of
+ resources that get deployed to a cluster. `Bundle` is the fundamental deployment unit used in Fleet. The
+ contents of a `Bundle` may be Kubernetes manifests, Kustomize configuration, or Helm charts.
+* **BundleDeployment**: When a `Bundle` is deployed to a cluster an instance of a `Bundle` is called a `BundleDeployment`.
+ A `BundleDeployment` represents the state of that `Bundle` on a specific cluster with it's cluster specific
+ customizations.
+* **Downstream Cluster**: Clusters to which Fleet deploys manifests are referred to as downstream clusters. In the single
+ cluster use case the Fleet manager Kubernetes cluster is both the manager and downstream cluster at the same time.
+* **Cluster Registration Token**: Tokens used by agents to register a new cluster.
\ No newline at end of file
diff --git a/docs/examples.md b/docs/examples.md
index dadf5d674c..c93beea2bd 100644
--- a/docs/examples.md
+++ b/docs/examples.md
@@ -1,33 +1,4 @@
-Examples
-========
+# Examples
-### Pure Kubernetes YAML
-
-This example uses only Kubernetes YAML that you'd typically see with `kubectl apply`.
-
-[Pure YAML Example](../examples/yaml)
-
-### Helm w/ Embedded Chart
-
-This example shows how to use a Helm chart that is defined locally in the bundle.
-
-[Helm Local Example](../examples/helm-local)
-
-### Helm External Chart
-
-This example shows how to use a chart hosted in a repo from an external source.
-This is the most common approach with third party charts.
-
-[Helm Download Example](../examples/helm-download)
-
-### Kustomize
-
-This example shows how to use a pure kustomize approach to deploy bundles
-
-[Kustomize Example](../examples/kustomize)
-
-### Helm w/ Kustomize Post Processing
-
-This example shows how to kustomize a helm chart
-
-[Helm Kustomize Example](../examples/helm-kustomize)
+Examples using raw Kubernetes YAML, Helm charts, Kustomize and combinations
+of the three are in the [Fleet Examples repo](https://github.com/rancher/fleet-examples/).
diff --git a/docs/gitops.md b/docs/gitops.md
deleted file mode 100644
index 62280536ac..0000000000
--- a/docs/gitops.md
+++ /dev/null
@@ -1,30 +0,0 @@
-GitOps and CI/CD
-================
-
-Fleet is designed to be used in a CD or GitOps pipeline. Because Fleet is a standard
-Kubernetes API it should integrate well in the existing ecosystem. One can use a
-tool such as ArgoCD or Flux in the fleet controller cluster to copy resources from Git to
-the fleet controller.
-
-Often a more traditional CI approach is much easier than running ArgoCD or Flux. For traditional CI
-one just needs to run `fleet test` and `fleet apply` as a part of the CI process. An example doing this with GitHub Actions
-is below.
-
-GitOps Patterns
-===============
-
-There are two scenarios to consider for GitOps. First is managing the resources in the fleet controller itself so that
-it can then manage clusters. The reason you do this as opposed to going directly to the clusters is that intention
-of the fleet controller is that as you add/delete clusters the clusters can immediately assume the configuration they are
-supposed to. Also fleet controller will roll out deployments in a way not easily possible with GitOps.
-
-The second scenario to consider is using fleet controller to define the GitOps pipelines that run in a cluster. You can
-use fleet controller to define the pipelines and then once the pipeline is established it goes directly to the cluster not
-through the fleet controller.
-
-GitHub Actions Example
-======================
-
-GitHub Actions combined with Fleet provides a very simple yet very powerful GitOps model. An example of how to use Fleet
-with Github Actions can be found [here](https://github.com/StrongMonkey/fleet-cd-example). The pattern used in this
-example can be very easily duplicated in any CI system.
diff --git a/docs/gitrepo-add.md b/docs/gitrepo-add.md
new file mode 100644
index 0000000000..a620c925be
--- /dev/null
+++ b/docs/gitrepo-add.md
@@ -0,0 +1,69 @@
+# Registering
+
+## Proper namespace
+Git repos are added to the Fleet manager using the `GitRepo` custom resource type. The
+`GitRepo` type is namespaced. If you are using Fleet in a [single cluster](./concepts.md)
+style the namespace will always be **fleet-local**. For a [multi-cluster](./concepts.md) style
+please ensure you use the correct repo that will map to the right target clusters.
+
+## Create GitRepo instance
+
+Git repositories are register by creating a `GitRepo` following the below YAML sample. Refer
+to the inline comments as the to means of each field
+
+```yaml
+kind: GitRepo
+apiVersion: {{fleet.apiVersion}}
+metadata:
+ # Any name can be used here, the created bundles will start with this name
+ name: my-repo
+ # For single cluster use fleet-local, otherwise use the namespace of
+ # your choosing
+ namespace: fleet-local
+spec:
+ # This can be a HTTPS or git URL. If you are using a git URL then
+ # clientSecretName will probably need to be set to supply a credential.
+ # repo is the only required parameter for a repo to be monitored.
+ #
+ repo: https://github.com/rancher/fleet-examples
+
+ # Any branch can be watched, this field is optional. If not specified the
+ # branch is assumed to be master
+ #
+ # branch: master
+
+ # A specific commit or tag can also be watched.
+ #
+ # revision: v0.3.0
+
+ # For a private registry you must supply a clientSecretName. A default
+ # secret can be set at the namespace level using the BundleRestriction
+ # type. Secrets must be of the type "kubernetes.io/ssh-auth" or
+ # "kubernetes.io/basic-auth". The secret is assumed to be in the
+ # same namespace as the GitRepo
+ #
+ # clientSecretName: my-ssh-key
+
+ # A git repo can produce multiple bundles or maybe your bundle
+ # is not at the root of the git repo. The below field is expected
+ # to be comma separated and supports path globbing (ex: some/*/path)
+ #
+ # Example:
+ # bundleDirs: single-path,multiple-paths/*
+ bundleDirs: simple
+
+ # The service account that will be used to perform this deployment.
+ # This is the name of the service account that exists in the
+ # downstream cluster in the fleet-system namespace. It is assumed
+ # this service account already exists so it should be create before
+ # hand, most likely coming from another git repo registered with
+ # the Fleet manager.
+ #
+ # serviceAccount: moreSecureAccountThanClusterAdmin
+
+ # Target clusters to deploy to if running Fleet in a multi-cluster
+ # style. Refer to the "Mapping to Downstream Clusters" docs for
+ # more information.
+ #
+ # targets: ...
+```
diff --git a/docs/gitrepo-rm.md b/docs/gitrepo-rm.md
new file mode 100644
index 0000000000..7fcadcb000
--- /dev/null
+++ b/docs/gitrepo-rm.md
@@ -0,0 +1,9 @@
+# Removing
+
+If you delete a `GitRepo` from the Fleet Manager the `Bundles` created by the git repo are not automatically removed.
+This is to prevent accidentally deleting software from clusters by just modifying the git repos. To fully remove
+the deployed software just delete the corresponding bundles too. This can be done by running
+
+```shell
+kubectl -n "${REPO_NAMESPACE}" delete bundles.fleet.cattle.io -l fleet.cattle.io/repo-name="${REPO_NAME}"
+```
\ No newline at end of file
diff --git a/docs/gitrepo-structure.md b/docs/gitrepo-structure.md
new file mode 100644
index 0000000000..05166ac03c
--- /dev/null
+++ b/docs/gitrepo-structure.md
@@ -0,0 +1,17 @@
+# Expected Repo Structure
+
+A registered git repository should have the following structure
+
+```
+./fleet.yaml # Bundle descriptor (optional)
+./manifests/ # Directory for raw kubernetes YAML (if used)
+./chart/ # Directory for an inline Helm Chart (if used)
+./kustomize/ # Directory for kustomization resources (if used)
+./overlays/${OVERLAY_NAME} # Directory for customize raw Kubernetes YAML resources (if used)
+```
+
+These directories can be configured to different paths using the `fleet.yaml` file. Refer to
+the [bundle reference](./bundles.md) documentation on how to customize the behavior.
+
+Also refer to the [examples](./examples.md) to learn how to use raw YAML, Helm, and Kustomize and
+how to customize deployments to specific clusters.
\ No newline at end of file
diff --git a/docs/gitrepo-targets.md b/docs/gitrepo-targets.md
new file mode 100644
index 0000000000..6128b23053
--- /dev/null
+++ b/docs/gitrepo-targets.md
@@ -0,0 +1,74 @@
+# Mapping to Downstream Clusters
+
+!!! hint "Multi-cluster Only"
+ This approach only applies if you are running Fleet in a multi-cluster style
+
+When deploying `GitRepos` to downstream clusters the clusters must be mapped to a target.
+
+## Defining targets
+
+The deployment targets of `GitRepo` is done using the `spec.targets` field to
+match clusters or cluster groups. The YAML specification is as below.
+
+```yaml
+kind: GitRepo
+apiVersion: {{fleet.apiVersion}}
+metadata:
+ name: myrepo
+ namespace: clusters
+spec:
+ repo: http://github.com/rancher/fleet-examples
+ bundleDirs: simple
+
+ # Targets are evaluated in order and the first one to match is used. If
+ # no targets match then the evaluated cluster will not be deployed to.
+ targets:
+ # The name of target. If not specified a default name of the format "target000"
+ # will be used
+ - name: prod
+ # A selector used to match clusters. The structure is the standard
+ # metav1.LabelSelector format. If clusterGroupSelector or clusterGroup is specified,
+ # clusterSelector will be used only to further refine the selection after
+ # clusterGroupSelector and clusterGroup is evaluated.
+ clusterSelector:
+ matchLabels:
+ env: prod
+ # A selector used to match cluster groups.
+ clusterGroupSelector:
+ matchLabels:
+ region: us-east
+ # A specific clusterGroup by name that will be selected
+ clusterGroup: group1
+```
+
+## Target Matching
+
+All clusters and cluster groups in the same namespace as the `GitRepo` will be evaluated against all targets.
+If any of the targets match the cluster then the `GitRepo` will be deployed to the downstream cluster. If
+no match is made, then the `GitRepo` will not be deployed to that cluster.
+
+There are three approaches to matching clusters.
+One can use cluster selectors, cluster group selectors, or an explicit cluster group name. All criteria is additive so
+the final match is evaluated as "clusterSelector && clusterGroupSelector && clusterGroup". If any of the three have the
+default value it is dropped from the criteria. The default value is either null or "". It is important to realize
+that the value `{}` for a selector means "match everything."
+
+```yaml
+# Match everything
+clusterSelector: {}
+# Selector ignored
+clusterSelector: null
+```
+
+## Default target
+
+If no target is set for the `GitRepo` then the default targets value is applied. The default targets value is as below.
+
+```yaml
+targets:
+- name: default
+ clusterGroup: default
+```
+
+This means if you wish to setup a default location non-configured GitRepos will go to, then just create a cluster group called default
+and add clusters to it.
\ No newline at end of file
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000000..963297efc9
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,14 @@
+# Introduction
+
+!!! hint "Status"
+ Fleet is currently alpha quality and actively being developed.
+
+Fleet is GitOps at scale. Fleet is designed to manage up to a million clusters. It's also lightweight
+enought that is works great for a [single cluster](./single-cluster-install.md) too, but it really shines
+when you get to a large scale. By large scale we mean either a lot of clusters, a lot of deployments, or a lot of
+teams in a single organization.
+
+Fleet can manage deployments from git of raw Kubernetes YAML, Helm charts, or Kustomize or any combination of the three.
+Regardless of the source all resources are dynamically turned into Helm charts and Helm is used as the engine to
+deploy everything in the cluster. This give a high degree of control, consistency, and auditability. Fleet focuses not only on
+the ability to scale, but to give one a high degree of control and visibility to exactly what is installed on the cluster.
\ No newline at end of file
diff --git a/docs/install.md b/docs/install.md
deleted file mode 100644
index 583aa8660d..0000000000
--- a/docs/install.md
+++ /dev/null
@@ -1,141 +0,0 @@
-Architecture and Installation
-=============================
-
-## Architectures
-
-Fleet has two primary components. The fleet controller and the cluster agents.
-
-### Fleet Manager
-
-The fleet controller is a set of Kubernetes controllers running in any standard Kubernetes
-cluster. The only API exposed by the Fleet manages is the Kubernetes API, there is no
-custom API for the fleet controller.
-
-### Cluster Agents
-
-One cluster agent runs in each cluster and is responsible for talking to the fleet controller.
-The only communication from cluster to fleet controller is by this agent and all communication
-goes from the managed cluster to the fleet controller. The fleet controller does not
-reach out to the clusters. This means managed clusters can run in private networks and behind
-NAT. The only requirement is the cluster agent needs to be able to communicate with the
-Kubernetes API of the cluster running the fleet controller.
-
-The cluster agents are not assumed to have an "always on" connection. They will resume operation as
-soon as they can connect. Future enhancements will probably add the ability to schedule times of when
-the agent checks in.
-
-### Security
-
-fleet controller dynamically creates service account, manages their RBAC and then gives the
-tokens to the clusters. A cluster group can have a series of "Cluster Group Tokens" that
-are used to register clusters in to that group. The "cluster group token" is used only during the
-registration process to generate a credential specific to that cluster. After the cluster credential
-is established the cluster "forgets" the cluster group token. Cluster group tokens by default have a TTL
-of one week. That can be changed to shorter or to longer to forever.
-
-The service accounts given to the clusters only have privileges to list `BundleDeployment` in the namespace created
-specifically for that cluster. It can also update the `status` subresource of `BundleDeployment`
-
-### Scalability
-
-Fleet is designed to scale up to 1 million clusters. There are more details to come here on how we expect to scale
-a Kubernetes controller based architecture to 100's of millions of objects and beyond.
-
-## Installation
-
-### Manager installation
-
-The controller is just a deployment that runs in a Kubernetes cluster. It is assumed you already have a Kubernetes
-cluster available. The `fleet install manager` command is used to generate a manifest for installation.
-The `fleet install manager` command does not need a live connection to a Kubernetes cluster.
-
-```
-Generate deployment manifest to run the fleet controller
-
-Usage:
- fleet install manager [flags]
-
-Flags:
- --agent-image string Image to use for all agents
- --crds-only Output CustomResourceDefinitions only
- -h, --help help for controller
- --controller-image string Image to use for controller
- --system-namespace string Namespace that will be use in controller and agent cluster (default "fleet-system")
-
-Global Flags:
- -k, --kubeconfig string kubeconfig for authentication
- -n, --namespace string namespace (default "default")
-```
-
-Installation is accomplished typically by doing `fleet install manager | kubectl apply -f -`. The `agent-image` and
-`controller-image` fields are important if you wish to run Fleet from a private registry. The `system-namespace` is the
-namespace the fleet controller runs in and also the namespace the cluster agents will run in all clusters. This is
-by default `fleet-system` and it is recommended to keep the default value.
-
-### Cluster Registration
-
-The `fleet install agent-token` and `fleet install agent-config` commands are used to generate Kubernetes manifests to be
-used to register clusters. A cluster group token must be generated to register a cluster to the fleet controller.
-By default this token will expire in 1 week. That TTL can be changed. The cluster group token generated can be
-used over and over again while it's still valid to register new clusters. The `agent-config` command is used to generate
-configuration specific to a cluster that you may or may not want to share. The only functionality at the moment is
-to generate the config for a cluster so that on registration it will have specific labels.
-
-The `fleet install agent-token` command requires a live connection to the fleet controller. Your local `~/.kube/config` is
-used by default.
-
-To register a cluster first run `fleet install agent-token` to generate a new token.
-
-```
-Generate cluster group token and render manifest to register clusters into a specific cluster group
-
-Usage:
- fleet install agent-token [flags]
-
-Flags:
- -c, --ca-file string File containing optional CA cert for fleet controller cluster
- -g, --group string Cluster group to generate config for (default "default")
- -h, --help help for agent-token
- --no-ca
- --server-url string The full URL to the fleet controller cluster
- --system-namespace string System namespace of the controller (default "fleet-system")
- -t, --ttl string How long the generated registration token is valid, 0 means forever (default "1440m")
-
-Global Flags:
- -k, --kubeconfig string kubeconfig for authentication
- -n, --namespace string namespace (default "default")
-```
-
-The generated manifest will have information in it that is used to call back to the fleet controller. By default the
-URL and TLS configuration is taken from your kubeconfig. Use `--server-url` and `--ca-file` to override those parameters
-if they can't be properly derived.
-
-The output of `fleet install agent-token` should be saved to a file you can later apply to a cluster.
-
-```
-# Generate token, requires connect to fleet controller
-fleet --kubeconfig=fleet-controller-config install agent-token > token
-```
-
-If you want to have labels assigned to your cluster during registration this must be done before you apply the token to
-the cluster. The labels are only specified during registration and then after that the cluster can not change it's labels.
-The labels can only be changed in the fleet controller.
-
-To generate a configuration with labels run a command like below:
-
-```
-fleet install agent-config -l env=prod | kubectl --kubeconfig=cluster-kubeconfig apply -f -
-```
-
-Now that you have the custom config setup you can import the token so that the cluster registers
-
-```
-kubectl --kubeconfig=cluster-kubeconfig apply -f token
-```
-
-### Re-Register/Re-Install Agent
-
-If for any reason your cluster can not connect, you can always generate a new cluster group token and apply it to the
-cluster. It will then restart the registration process and generate a new credentials. The identity of the cluster is
-determined by the UUID of the `kube-system` namespace so it should reassociate to the cluster previously registered regardless
-of the name or labels of the cluster.
diff --git a/docs/installation.md b/docs/installation.md
new file mode 100644
index 0000000000..c9c10eab3e
--- /dev/null
+++ b/docs/installation.md
@@ -0,0 +1,9 @@
+# Installation
+
+The installation is broken up into two different use cases: [Single](./single-cluster-install.md) and
+[Multi-Cluster](./multi-cluster-install.md) install. The single cluster install is for if you wish to use GitOps to manage a single cluster,
+in which case you do not need a centralized manager cluster. The multi-cluster use case
+setup you will setup a centralized manager cluster to which you can register clusters.
+
+If you are just learning Fleet the single cluster install is recommend starting
+point and you can move from single cluster to multi-cluster down the line.
\ No newline at end of file
diff --git a/docs/manager-initiated.md b/docs/manager-initiated.md
new file mode 100644
index 0000000000..52f3cd23a3
--- /dev/null
+++ b/docs/manager-initiated.md
@@ -0,0 +1,5 @@
+# Manager Initiated
+
+Refer to the [overview page](./cluster-overview.md#agent-initiated-registration) for a background information on the manager initiated registration style.
+
+## TODO
diff --git a/docs/multi-cluster-install.md b/docs/multi-cluster-install.md
new file mode 100644
index 0000000000..8f77a5a277
--- /dev/null
+++ b/docs/multi-cluster-install.md
@@ -0,0 +1,143 @@
+# Multi-cluster Install
+![](./arch.png)
+
+In this use case you will setup a centralized Fleet manager. The centralize Fleet manage is a
+Kubernetes cluster running the Fleet controllers. After installing the Fleet manager you will then
+need to register remote downstream clusters with the Fleet manager.
+
+## Prerequisites
+
+### Helm 3
+
+Fleet is distributed as a Helm chart. Helm 3 is just a CLI and has no server side component so it's
+pretty straight forward. To install the Helm 3 CLI follow the
+[official install instructions](https://helm.sh/docs/intro/install/). The TL;DR is
+
+macOS
+```
+brew install helm
+```
+Windows
+```
+choco install kubernetes-helm
+```
+
+### Kubernetes
+
+The Fleet manager is a controller running on a Kubernetes cluster so an existing cluster is required. All
+downstream cluster that will be managed will need to communicate to this central Kubernetes cluster. This
+means the Kubernetes API server URL must be accesible to the downstream clusters. Any Kubernetes community
+supported version of Kubernetes will work, in practice this means 1.15 or greater.
+
+## API Server URL and CA certificate
+
+In order for your Fleet management installation to properly work it is important
+the correct API server URL and CA certificates are configured properly. The Fleet agents
+will communicate to the Kubernetes API server URL. This means the Kubernetes
+API server must be accessible to the downstream clusters. You will also need
+to obtain the CA certificate of the API server. The easiest way to obtain this information
+is typically from your kubeconfig file (`${HOME}/.kube/config`). The `server` and
+`certificate-authority` fields will have these values.
+
+```yaml
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: LS0tLS1CRUdJTi...
+ server: https://example.com:6443
+```
+
+Please note that the `certificate-authority-data` field is base64 encoded and will need to be
+decoded before you save it into a file. This can be done by saving the base64 encoded contents to
+a file and then run
+```shell
+base64 -d encoded-file > ca.pem
+```
+If you have `jq` and `base64` available then this one-liners will pull all CA certificates from your
+`${HOME}/.kube/config` and place then in a file named `ca.pem`.
+
+```shell
+kubectl config view -o json --raw | jq -r '.clusters[].cluster["certificate-authority-data"]' | base64 -d > ca.pem
+```
+
+## Install
+
+In the following example it will be assumed the API server URL is `https://example.com:6443`
+and the CA certificate is in the file `ca.pem`. If your API server URL is signed by a well known CA you can
+omit the `apiServerCA` parameter below or just create an empty `ca.pem` file (ie `touch ca.pem`).
+
+Run the following commands
+
+Setup the environment with your specific values.
+
+```shell
+API_SERVER_URL="https://example.com:6443"
+# Leave empty if your API server is signed by a well known CA
+API_SERVER_CA="ca.pem"
+```
+
+First validate the server URL is correct.
+
+```shell
+curl -fLk ${API_SERVER_URL}/version
+```
+
+The output of this command should be JSON with the version of the Kubernetes server or a `401 Unauthorized` error.
+If you do not get either of these results than please ensure you have the correct URL. The API server port is typically
+6443 for Kubernetes.
+
+Next validate that the CA certificate is proper by running the below command. If your API server is signed by a
+well known CA then omit the `--cacert ${API_SERVER_CA}` port of the command.
+
+```shell
+curl -fL --cacert ${API_SERVER_CA} ${API_SERVER_URL}/version
+```
+
+If you get a valid JSON response or an `401 Unauthorized` then it worked. The Unauthorized error is
+only because the curl command is not setting proper credentials, but this validates that the TLS
+connection work and the `ca.pem` is correct for this URL. If you get a `SSL certificate problem` then
+the `ca.pem` is not correct. The contents of the `${API_SERVER_CA}` file should look similar to the below
+
+```
+-----BEGIN CERTIFICATE-----
+MIIBVjCB/qADAgECAgEAMAoGCCqGSM49BAMCMCMxITAfBgNVBAMMGGszcy1zZXJ2
+ZXItY2FAMTU5ODM5MDQ0NzAeFw0yMDA4MjUyMTIwNDdaFw0zMDA4MjMyMTIwNDda
+MCMxITAfBgNVBAMMGGszcy1zZXJ2ZXItY2FAMTU5ODM5MDQ0NzBZMBMGByqGSM49
+AgEGCCqGSM49AwEHA0IABDXlQNkXnwUPdbSgGz5Rk6U9ldGFjF6y1YyF36cNGk4E
+0lMgNcVVD9gKuUSXEJk8tzHz3ra/+yTwSL5xQeLHBl+jIzAhMA4GA1UdDwEB/wQE
+AwICpDAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMCA0cAMEQCIFMtZ5gGDoDs
+ciRyve+T4xbRNVHES39tjjup/LuN4tAgAiAteeB3jgpTMpZyZcOOHl9gpZ8PgEcN
+KDs/pb3fnMTtpA==
+-----END CERTIFICATE-----
+```
+
+Once you have validated the API server URL and API server CA parameters, install the following two
+Helm charts.
+
+First install the Fleet CustomResourcesDefintions.
+```shell
+helm -n fleet-system install --create-namespace --wait fleet-crd https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-crd-{{fleet.helmversion}}.tgz
+```
+
+Second install the Fleet controllers.
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ --set apiServerURL="${API_SERVER_URL}" \
+ --set-file apiServerCA="${API_SERVER_CA}" \
+ fleet https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-{{fleet.helmversion}}.tgz
+```
+
+Fleet should be ready to use. You can check the status of the Fleet controller pods by running the below commands.
+
+```shell
+kubectl -n fleet-system logs -l app=fleet-controller
+kubectl -n fleet-system get pods -l app=fleet-controller
+```
+
+```
+NAME READY STATUS RESTARTS AGE
+fleet-controller-64f49d756b-n57wq 1/1 Running 0 3m21s
+```
+
+At this point the Fleet manager should be ready. You can now [register clusters](./cluster-overview.md) and [git repos](./gitrepo-add.md) with
+the Fleet manager.
\ No newline at end of file
diff --git a/docs/namespaces.md b/docs/namespaces.md
new file mode 100644
index 0000000000..5f28f06eaa
--- /dev/null
+++ b/docs/namespaces.md
@@ -0,0 +1,75 @@
+# Namespaces
+
+All types in the Fleet manager are namespaced. The namespaces of the manager types do not correspond to the namespaces
+of the deployed resources in the downstream cluster. Understanding how namespaces are use in the Fleet manager is
+important to understand the security model and how one can use Fleet in a multi-tenant fashion.
+
+## GitRepos, Bundles, Clusters, ClusterGroups
+
+The primary types are all scoped to a namespace. All selectors for `GitRepo` targets will be evaluated against
+the `Clusters` and `ClusterGroups` in the same namespaces. This means that if you give `create` or `update` privileges
+to a the `GitRepo` type in a namespace, that end user can modify the selector to match any cluster in that namespace.
+This means in practice if you want to have two teams self manage their own `GitRepo` registrations but they should
+not be able to target each others clusters, they should be in different namespaces.
+
+## Special Namespaces
+
+### fleet-local
+
+The **fleet-local** namespace is a special namespace used for the single cluster use case or to bootstrap
+the configuration of the Fleet manager.
+
+When fleet is installed the `fleet-local` namespace is create along with one `Cluster` called `local` and one
+`ClusterGroup` called default. If no targets are specified on a `GitRepo`, they by default deploy to the
+`ClusterGroup` named `default`, if it exists. This means that all `GitRepos` created in `fleet-local` will
+automatically target the `local` `Cluster`. The `local` `Cluster` refers to the cluster the Fleet manager is running
+on.
+
+### fleet-system
+
+The Fleet controller and Fleet agent run in this namespace. All service accounts referenced by `GitRepos` are expected
+to live in this namespace in the downstream cluster.
+
+### Cluster namespaces
+
+For every cluster that is registered a namespace is created by the Fleet manager for that cluster.
+These namespaces have are named in the form `cluster-${namespace}-${cluster}`. The purpose of this
+namespace is that all `BundleDeployments` for that cluster are put into this namespace and
+then the downstream cluster is given access to watch and update `BundleDeployments` in that namespace only.
+
+## Cross namespace deployments
+
+It is possible to create a GitRepo that will deploy across namespaces. The primary purpose of this is so that a
+central privileged team can manage common configuration for many clusters that are managed by different teams. The way
+this is accomplished is by creating a `BundleNamespaceMapping` resource in a clusters.
+
+If you are creating a `BundleNamespaceMapping` resource it is best to do it in a namespace that only contains `GitRepos`
+and no `Clusters`. It seems to get confusing if you have Clusters in the same repo as the cross namespace `GitRepos` will still
+always be evaluated against the current namespace. So if you have clusters in the same namespace you may wish to make them
+canary clusters.
+
+A `BundleNamespaceMapping` has only two fields. Which are as below
+
+```yaml
+kind: BundleNamespaceMapping
+apiVersion: {{fleet.version}}
+metadata:
+ name: not-important
+ namespace: typically-unique
+
+# Bundles to match by label. The labels are defined in the fleet.yaml
+# labels field or from the GitRepo metadata.labels field
+bundleSelector:
+ matchLabels:
+ foo: bar
+
+# Namespaces to match by label
+namespaceSelector:
+ matchLabels:
+ foo: bar
+```
+
+If the `BundleNamespaceMappings` `bundleSelector` field matches a `Bundles` labels then that `Bundle` target criteria will
+be evaluated against all clusters in all namespaces that match `namespaceSelector`. One can specify labels for the created
+bundles from git by putting labels in the `fleet.yaml` file or on the `metadata.labels` field on the `GitRepo`.
+
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 0000000000..4e87852a9b
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,63 @@
+# Quick Start
+
+Who needs documentation, lets just run this thing!
+
+## Install
+
+Get helm if you don't have it. Helm 3 is just a CLI and won't do bad insecure
+things to your cluster.
+
+```
+brew install helm
+```
+
+Install the Fleet Helm charts (there's two because we separate out CRDs for ultimate flexibility.)
+
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ fleet-crd https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-crd-{{fleet.helmversion}}.tgz
+helm -n fleet-system install --create-namespace --wait \
+ fleet https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-{{fleet.helmversion}}.tgz
+```
+
+## Add a Git Repo to watch
+
+Change `spec.repo` to your git repo of choice. Kubernetes manifest files that should
+be deployed should be in `/manifests` in your repo.
+
+```bash
+cat > example.yaml << "EOF"
+apiVersion: fleet.cattle.io/v1alpha1
+kind: GitRepo
+metadata:
+ name: sample
+ # This namespace is special and auto-wired to deploy to the local cluster
+ namespace: fleet-local
+spec:
+ # Everything from this repo will be ran in this cluster. You trust me right?
+ repo: "https://github.com/rancher/fleet-examples"
+ bundleDirs: simple
+EOF
+
+kubectl apply -f example.yaml
+```
+
+## Get Status
+
+Get status of what fleet is doing
+
+```shell
+kubectl -n fleet-local get fleet
+```
+
+You should see something like this get created in your cluster.
+
+```
+kubectl get deploy frontend
+```
+```
+NAME READY UP-TO-DATE AVAILABLE AGE
+frontend 3/3 3 3 116m
+```
+
+Enjoy and read the [docs](https://rancher.github.io/fleet).
diff --git a/docs/single-cluster-install.md b/docs/single-cluster-install.md
new file mode 100644
index 0000000000..d02ae4f810
--- /dev/null
+++ b/docs/single-cluster-install.md
@@ -0,0 +1,62 @@
+# Single Cluster Install
+![](./single-cluster.png)
+
+In this use case you have only one cluster. The cluster will run both the Fleet
+manager and the Fleet agent. The cluster will communicate with Git server to
+deploy resources to this local cluster. This is the simplest setup and very
+useful for dev/test and small scale setups. This use case is supported as a valid
+use case for production for smaller scale.
+
+## Prerequisites
+
+### Helm 3
+
+Fleet is distributed as a Helm chart. Helm 3 is just a CLI and has no server side component so it's
+pretty straight forward. To install the Helm 3 CLI follow the
+[official install instructions](https://helm.sh/docs/intro/install/). The TL;DR is
+
+macOS
+```
+brew install helm
+```
+Windows
+```
+choco install kubernetes-helm
+```
+
+### Kubernetes
+
+Fleet is a controller running on a Kubernetes cluster so an existing cluster is required. For the
+single cluster use case you would install Fleet to the cluster which you intend to manage with GitOps.
+Any Kubernetes community supported version of Kubernetes will work, in practice this means 1.15 or greater.
+
+## Install
+
+Install the following two Helm charts.
+
+First install the Fleet CustomResourcesDefintions.
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ fleet-crd https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-crd-{{fleet.helmversion}}.tgz
+```
+
+Second install the Fleet controllers.
+```shell
+helm -n fleet-system install --create-namespace --wait \
+ fleet https://github.com/rancher/fleet/releases/download/{{fleet.version}}/fleet-{{fleet.helmversion}}.tgz
+```
+
+Fleet should be ready to use now for single cluster. You can check the status of the Fleet controller pods by
+running the below commands.
+
+```shell
+kubectl -n fleet-system logs -l app=fleet-controller
+kubectl -n fleet-system get pods -l app=fleet-controller
+```
+
+```
+NAME READY STATUS RESTARTS AGE
+fleet-controller-64f49d756b-n57wq 1/1 Running 0 3m21s
+```
+
+You can now [register some git repos](./gitrepo-add.md) in the `fleet-local` namespace to start deploying Kubernetes resources.
\ No newline at end of file
diff --git a/docs/single-cluster.png b/docs/single-cluster.png
new file mode 100644
index 0000000000..158c27c4bd
Binary files /dev/null and b/docs/single-cluster.png differ
diff --git a/docs/uninstall.md b/docs/uninstall.md
new file mode 100644
index 0000000000..30404ce4c5
--- /dev/null
+++ b/docs/uninstall.md
@@ -0,0 +1 @@
+TODO
\ No newline at end of file
diff --git a/examples/helm-download/bundle.yaml b/examples/helm-download/bundle.yaml
deleted file mode 100644
index 571954b2a5..0000000000
--- a/examples/helm-download/bundle.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: helm-download
-chart: https://charts.fluxcd.io/flux-1.2.0.tgz
-defaultNamespace: fluxcd
-
-overlays:
-- name: commonoptions
- values:
- image:
- pullPolicy: Always
-
-targets:
-- clusterSelector: {}
- overlays:
- - commonoptions
- values:
- image:
- repository: somemirror/fluxcd/flux
- tag: 1.18.0
diff --git a/examples/helm-download/log b/examples/helm-download/log
deleted file mode 100644
index b59861c542..0000000000
--- a/examples/helm-download/log
+++ /dev/null
@@ -1,492 +0,0 @@
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9204"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:14-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9205"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:14Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9206"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:14Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9207"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:15-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9208"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:15Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9209"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:15-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9210"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:15Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9212"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:16-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9213"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:16-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9214"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:16Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9245"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:16Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9248"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:17-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9249"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:17Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9250"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:17-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9251"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:17Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
diff --git a/examples/helm-download/log2 b/examples/helm-download/log2
deleted file mode 100644
index b59861c542..0000000000
--- a/examples/helm-download/log2
+++ /dev/null
@@ -1,492 +0,0 @@
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9204"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:14-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9205"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:14Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9206"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:14Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9207"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:15-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9208"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:15Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9209"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:15-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9210"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:15Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9212"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:16-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9213"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:16-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9214"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:16Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9245"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:16Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9248"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:17-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:10Z"
- generation: 1
- labels:
- name: import-two
- name: cluster-93d18642-217a-486b-9a5d-be06762443b2
- namespace: default-bobby-group
- resourceVersion: "9249"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-93d18642-217a-486b-9a5d-be06762443b2
- uid: 6664b0a6-d0a4-4cff-996c-7ed2d142f811
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:17Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:10Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-93d18642-217a-486b-9a5d-be067-69cc4
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:01Z"
- generation: 1
- labels:
- name: import
- name: cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- namespace: default-bobby-group
- resourceVersion: "9250"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-bobby-group/clusters/cluster-d7b5d925-fc56-45ca-92d5-de98f6728dd5
- uid: 4df6492d-4130-4e48-906e-a46bebd10c3b
-spec: {}
-status:
- clusterGroupName: bobby
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T01:35:17-07:00"
- message: 'NotApplied: 1 (helm-download )'
- status: "False"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:01Z"
- status: "True"
- type: Processed
- namespace: default-bobby-group-cluster-d7b5d925-fc56-45ca-92d5-de98f-a1b21
- summary:
- desiredReady: 2
- nonReadyResources:
- - bundleState: Ready
- name: fleet-agent
- - bundleState: NotApplied
- name: helm-download
- notApplied: 1
- ready: 1
----
-apiVersion: fleet.cattle.io/v1alpha1
-kind: Cluster
-metadata:
- creationTimestamp: "2020-03-31T05:02:12Z"
- generation: 1
- labels:
- name: import-three
- name: cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- namespace: default-othergroup-group
- resourceVersion: "9251"
- selfLink: /apis/fleet.cattle.io/v1alpha1/namespaces/default-othergroup-group/clusters/cluster-f6a0e6da-ff49-4aab-9a21-fbe4687dd25b
- uid: 1ca4e2ab-d4ad-47ea-b7b1-db913b834a84
-spec: {}
-status:
- clusterGroupName: othergroup
- clusterGroupNamespace: default
- conditions:
- - lastUpdateTime: "2020-03-31T08:35:17Z"
- status: "True"
- type: Ready
- - lastUpdateTime: "2020-03-31T05:02:12Z"
- status: "True"
- type: Processed
- namespace: default-othergroup-group-cluster-f6a0e6da-ff49-4aab-9a21--f41c0
- summary:
- desiredReady: 1
- nonReadyNames:
- fleet-agent:
- bundleState: Ready
- ready: 1
diff --git a/examples/helm-kustomize/bundle.yaml b/examples/helm-kustomize/bundle.yaml
deleted file mode 100644
index fe52118d29..0000000000
--- a/examples/helm-kustomize/bundle.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Refer to other helm examples for general helm features
-name: helm-kustomize
-targets:
-- name: prod
- clusterSelector:
- matchLabels:
- env: prod
- # note that this directory is relate to ./kustomize
- kustomizeDir: production
-
-- name: staging
- clusterSelector:
- matchLabels:
- env: staging
- # note that this directory is relate to ./kustomize
- kustomizeDir: staging
-
-- name: dev
- clusterSelector:
- matchLabels:
- env: dev
- # note that this directory is relate to ./kustomize
- kustomizeDir: dev
-
-- name: default
- # Match everything
- clusterSelector: {}
diff --git a/examples/helm-kustomize/chart/Chart.yaml b/examples/helm-kustomize/chart/Chart.yaml
deleted file mode 100644
index 28224fdd8d..0000000000
--- a/examples/helm-kustomize/chart/Chart.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-name: simple
-version: v0.1
diff --git a/examples/helm-kustomize/chart/templates/pod.yaml b/examples/helm-kustomize/chart/templates/pod.yaml
deleted file mode 100644
index fe5ee411aa..0000000000
--- a/examples/helm-kustomize/chart/templates/pod.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: myapp-pod
- labels:
- app: myapp
-spec:
- containers:
- - name: nginx
- image: nginx:1.7.9
diff --git a/examples/helm-kustomize/kustomize/dev/kustomization.yaml b/examples/helm-kustomize/kustomize/dev/kustomization.yaml
deleted file mode 100644
index e770515709..0000000000
--- a/examples/helm-kustomize/kustomize/dev/kustomization.yaml
+++ /dev/null
@@ -1 +0,0 @@
-namePrefix: dev-
diff --git a/examples/helm-kustomize/kustomize/production/kustomization.yaml b/examples/helm-kustomize/kustomize/production/kustomization.yaml
deleted file mode 100644
index c3d18807a1..0000000000
--- a/examples/helm-kustomize/kustomize/production/kustomization.yaml
+++ /dev/null
@@ -1 +0,0 @@
-namePrefix: prod-
diff --git a/examples/helm-kustomize/kustomize/staging/kustomization.yaml b/examples/helm-kustomize/kustomize/staging/kustomization.yaml
deleted file mode 100644
index 78fa08e78e..0000000000
--- a/examples/helm-kustomize/kustomize/staging/kustomization.yaml
+++ /dev/null
@@ -1 +0,0 @@
-namePrefix: staging-
diff --git a/examples/helm-local/bundle.yaml b/examples/helm-local/bundle.yaml
deleted file mode 100644
index 664ada3123..0000000000
--- a/examples/helm-local/bundle.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: helm
-defaultNamespace: fluxcd
-
-overlays:
-- name: commonoptions
- values:
- image:
- pullPolicy: Always
-
-targets:
-- clusterSelector: {}
- overlays:
- - commonoptions
- values:
- image:
- repository: somemirror/fluxcd/flux
- tag: 1.18.0
diff --git a/examples/helm-local/chart/.helmignore b/examples/helm-local/chart/.helmignore
deleted file mode 100755
index f0c1319444..0000000000
--- a/examples/helm-local/chart/.helmignore
+++ /dev/null
@@ -1,21 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
diff --git a/examples/helm-local/chart/CHANGELOG.md b/examples/helm-local/chart/CHANGELOG.md
deleted file mode 100755
index 5db4d9de82..0000000000
--- a/examples/helm-local/chart/CHANGELOG.md
+++ /dev/null
@@ -1,486 +0,0 @@
-## 1.2.0 (2020-02-06)
-
-### Improvements
-
- - Updated Flux to `1.18.0`
- [fluxcd/flux#2825](https://github.com/fluxcd/flux/pull/2825)
- - Add registry disable scanning to chart options
- [fluxcd/flux#2828](https://github.com/fluxcd/flux/pull/2828)
- - Add pod labels to chart options
- [fluxcd/flux#2775](https://github.com/fluxcd/flux/pull/2775)
- - Add sops decryption to chart options
- [fluxcd/flux#2762](https://github.com/fluxcd/flux/pull/2762)
-
-## 1.1.0 (2020-01-14)
-
-### Improvements
-
- - Updated Flux to `1.17.1`
- [fluxcd/flux#2738](https://github.com/fluxcd/flux/pull/2738)
- - Separate Git Poll Interval from Sync Interval
- [fluxcd/flux#2721](https://github.com/fluxcd/flux/pull/2721)
- - Namespace whitelisting in helm chart without clusterRole
- [fluxcd/flux#2719](https://github.com/fluxcd/flux/pull/2719)
- - Added hostAliases to deployment template
- [fluxcd/flux#2705](https://github.com/fluxcd/flux/pull/2705)
-
-## 1.0.0 (2019-12-16)
-
-**Note** The Helm Operator manifests have been **removed** from this chart.
-Please see the [install instruction](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator)
-for Helm Operator v1.0.0. To keep using the same SSH key as Flux see the docs
-[here](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator#use-fluxs-git-deploy-key).
-The upgrade procedure for `HelmReleases` from `v1beta1` to `v1` can be found
-[here](https://docs.fluxcd.io/projects/helm-operator/en/latest/guides/upgrading-to-ga.html).
-
-### Improvements
-
- - Updated Flux to `1.17.0`
- [fluxcd/flux#2693](https://github.com/fluxcd/flux/pull/2693)
- - Add a ServiceMonitor template
- [fluxcd/flux#2668](https://github.com/fluxcd/flux/pull/2668)
- - Update the automation interval flag in the chart
- [fluxcd/flux#2551](https://github.com/fluxcd/flux/pull/2551)
-
-## 0.16.0 (2019-11-28)
-
-### Improvements
-
- - Updated Flux to `1.16.0`
- [fluxcd/flux#2639](https://github.com/fluxcd/flux/pull/2639)
- - Allow `git.verifySignature` to be `"false"`
- [fluxcd/flux#2573](https://github.com/fluxcd/flux/pull/2573)
- - Update the automation interval flag in the chart
- [fluxcd/flux#2551](https://github.com/fluxcd/flux/pull/2551)
-
-### Bug fixes
-
- - Fix memcached PSP
- [fluxcd/flux#2542](https://github.com/fluxcd/flux/pull/2542)
-
-## 0.15.0 (2019-10-07)
-
-**Note** The Helm Operator options will be **removed** from this chart in the next major release.
-Please see the [install instruction](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator)
-for Helm Operator v1.0.0. To keep using the same SSH key as Flux see the docs
-[here](https://github.com/fluxcd/helm-operator/tree/master/chart/helm-operator#use-fluxs-git-deploy-key).
-The upgrade procedure for `HelmReleases` from `v1beta1` to `v1` can be found
-[here](https://docs.fluxcd.io/projects/helm-operator/en/latest/guides/upgrading-to-ga.html).
-
-### Improvements
-
- - Updated Flux to `1.15.0`
- [fluxcd/flux#2490](https://github.com/fluxcd/flux/pull/2490)
- - Support secure Git over HTTPS using credentials from environment variables
- [fluxcd/flux#2470](https://github.com/fluxcd/flux/pull/2470)
- - Make sync operations timeout configurable with the `sync.timeout` option
- [fluxcd/flux#2481](https://github.com/fluxcd/flux/pull/2481)
-
-### Bug fixes
-
- - Mount AKS service principal through secret instead of hostPath for ACR support
- [fluxcd/flux#2437](https://github.com/fluxcd/flux/pull/2437)
- [fluxcd/flux#2434](https://github.com/fluxcd/flux/pull/2434)
-
-## 0.14.1 (2019-09-04)
-
-### Improvements
-
- - Updated Flux to `1.14.2`
- [fluxcd/flux#2419](https://github.com/fluxcd/flux/pull/2419)
-
-## 0.14.0 (2019-08-22)
-
-### Improvements
-
- - Updated Flux to `1.14.1`
- [fluxcd/flux#2401](https://github.com/fluxcd/flux/pull/2401)
- - Add the ability to disable memcached and set an external memcached service
- [fluxcd/flux#2393](https://github.com/fluxcd/flux/pull/2393)
-
-## 0.13.0 (2019-08-21)
-
-### Improvements
-
-**Note** The Flux chart is now hosted at `https://charts.fluxcd.io`
-
- - Updated Flux to `1.14.0`
- [fluxcd/flux#2380](https://github.com/fluxcd/flux/pull/2380)
- - Add `git.readonly` option to chart
- [fluxcd/flux#1807](https://github.com/fluxcd/flux/pull/1807)
- - Helm chart repository has been changed to `charts.fluxcd.io`
- [fluxcd/flux#2341](https://github.com/fluxcd/flux/pull/2341)
-
-## 0.12.0 (2019-08-08)
-
-### Improvements
-
- - Updated Flux to `1.13.3` and the Helm operator to `0.10.1`
- [fluxcd/flux#2296](https://github.com/fluxcd/flux/pull/2296)
- [fluxcd/flux#2318](https://github.com/fluxcd/flux/pull/2318)
- - Add manifest generation to helm chart
- [fluxcd/flux#2332](https://github.com/fluxcd/flux/pull/2332)
- [fluxcd/flux#2335](https://github.com/fluxcd/flux/pull/2335)
- - Let a named cluster role be used in chart
- [fluxcd/flux#2266](https://github.com/fluxcd/flux/pull/2266)
-
-## 0.11.0 (2019-07-10)
-
-### Improvements
-
- - Updated Flux to `1.13.2` and the Helm operator to `0.10.0`
- [fluxcd/flux#2235](https://github.com/fluxcd/flux/pull/2235)
- [fluxcd/flux#2237](https://github.com/fluxcd/flux/pull/2237)
- - Changed from DockerHub organization `weaveworks` -> `fluxcd`
- [fluxcd/flux#2224](https://github.com/fluxcd/flux/pull/2224)
- - Updated `HelmRelease` CRD to support rollbacks
- [fluxcd/flux#2006](https://github.com/fluxcd/flux/pull/2006)
- - Allow namespace scoping for both Flux and the Helm operator
- [fluxcd/flux#2206](https://github.com/fluxcd/flux/pull/2206)
- [fluxcd/flux#2209](https://github.com/fluxcd/flux/pull/2209)
- - Removed long deprecated `FluxHelmRelease` CRD and disabled CRD
- creation as the default to follow our own best practices
- [fluxcd/flux#2190](https://github.com/fluxcd/flux/pull/2190)
- - Enable `PodSecurityPolicy`
- [fluxcd/flux#2223](https://github.com/fluxcd/flux/pull/2223)
- [fluxcd/flux#2225](https://github.com/fluxcd/flux/pull/2225)
- - Support new Flux `--registry-use-labels` flag (`registry.useTimestampLabels`)
- [fluxcd/flux#2176](https://github.com/fluxcd/flux/pull/2176)
- - Support new Helm operator `--workers` flag (`helmOperator.workers`)
- [fluxcd/flux#2236](https://github.com/fluxcd/flux/pull/2236)
-
-## 0.10.2 (2019-06-27)
-
-### Improvements
-
- - Updated Flux to `1.13.1`
- [weaveworks/flux#2203](https://github.com/weaveworks/flux/pull/2203)
-
-## 0.10.1 (2019-06-16)
-
-### Bug fixes
-
- - Fix memcached security context
- [weaveworks/flux#2163](https://github.com/weaveworks/flux/pull/2163)
-
-## 0.10.0 (2019-06-14)
-
-### Improvements
-
- - Updated Flux to `1.13.0` and Helm operator to `0.9.2`
- [weaveworks/flux#2150](https://github.com/weaveworks/flux/pull/2150)
- [weaveworks/flux#2153](https://github.com/weaveworks/flux/pull/2153)
- - Updated memcached to `1.5.15` and configured default security context
- [weaveworks/flux#2107](https://github.com/weaveworks/flux/pull/2107)
- - Toggle garbage collection dry-run
- [weaveworks/flux#2063](https://github.com/weaveworks/flux/pull/2063)
- - Toggle git signature verification
- [weaveworks/flux#2053](https://github.com/weaveworks/flux/pull/2053)
- - Support `dnsPolicy` and `dnsConfig` in Flux daemon deployment
- [weaveworks/flux#2116](https://github.com/weaveworks/flux/pull/2116)
- - Support configurable log format
- [weaveworks/flux#2138](https://github.com/weaveworks/flux/pull/2138)
- - Support additional sidecar containers
- [weaveworks/flux#2130](https://github.com/weaveworks/flux/pull/2130)
-
-### Bug fixes
-
- - Fix `extraVolumes` indentation
- [weaveworks/flux#2102](https://github.com/weaveworks/flux/pull/2102)
-
-## 0.9.5 (2019-05-22)
-
- - Updated Flux to `1.12.3`
- [weaveworks/flux#2076](https://github.com/weaveworks/flux/pull/2076)
-
-## 0.9.4 (2019-05-09)
-
- - Updated Helm operator to `0.9.1`
- [weaveworks/flux#2032](https://github.com/weaveworks/flux/pull/2032)
-
-## 0.9.3 (2019-05-08)
-
-### Improvements
-
- - Updated Flux to `1.12.2` and Helm operator to `0.9.0`
- [weaveworks/flux#2025](https://github.com/weaveworks/flux/pull/2025)
- - Mount sub path of repositories secret
- [weaveworks/flux#2014](https://github.com/weaveworks/flux/pull/2014)
- - Toggle garbage collection
- [weaveworks/flux#2004](https://github.com/weaveworks/flux/pull/2004)
-
-## 0.9.2 (2019-04-29)
-
-### Improvements
-
- - Updated Flux to `1.12.1`
- [weaveworks/flux#1993](https://github.com/weaveworks/flux/pull/1993)
-
-## 0.9.1 (2019-04-17)
-
-### Improvements
-
- - Add the `status` subresource to HelmRelease CRD
- [weaveworks/flux#1906](https://github.com/weaveworks/flux/pull/1906)
- - Switch image registry from Quay to Docker Hub
- [weaveworks/flux#1949](https://github.com/weaveworks/flux/pull/1949)
-
-## 0.9.0 (2019-04-11)
-
-### Improvements
-
- - Updated Flux to `1.12.0` and Helm operator to `0.8.0`
- [weaveworks/flux#1924](https://github.com/weaveworks/flux/pull/1924)
- - Add ECR require option
- [weaveworks/flux#1863](https://github.com/weaveworks/flux/pull/1863)
- - Support loading values from alternative files in chart
- [weaveworks/flux#1909](https://github.com/weaveworks/flux/pull/1909)
- - Add Git poll interval option
- [weaveworks/flux#1910](https://github.com/weaveworks/flux/pull/1910)
- - Add init container, extra volumes and volume mounts
- [weaveworks/flux#1918](https://github.com/weaveworks/flux/pull/1918)
- - Add docker config file path option
- [weaveworks/flux#1919](https://github.com/weaveworks/flux/pull/1919)
-
-## 0.8.0 (2019-04-04)
-
-### Improvements
-
- - Updated Flux to `1.11.1`
- [weaveworks/flux#1892](https://github.com/weaveworks/flux/pull/1892)
- - Define custom Helm repositories in the Helm chart
- [weaveworks/flux#1893](https://github.com/weaveworks/flux/pull/1893)
- - Increase memcached max memory to 512MB
- [weaveworks/flux#1900](https://github.com/weaveworks/flux/pull/1900)
-
-## 0.7.0 (2019-03-27)
-
-### Improvements
-
- - Updated Flux to `1.11.0` and Helm operator to `0.7.1`
- [weaveworks/flux#1871](https://github.com/weaveworks/flux/pull/1871)
- - Allow mounting of docker credentials file
- [weaveworks/flux#1762](https://github.com/weaveworks/flux/pull/1762)
- - Increase memcached memory defaults
- [weaveworks/flux#1780](https://github.com/weaveworks/flux/pull/1780)
- - GPG Git commit signing
- [weaveworks/flux#1394](https://github.com/weaveworks/flux/pull/1394)
-
-## 0.6.3 (2019-02-14)
-
-### Improvements
-
- - Updated Flux to `1.10.1`
- [weaveworks/flux#1740](https://github.com/weaveworks/flux/pull/1740)
- - Add option to set pod annotations
- [weaveworks/flux#1737](https://github.com/weaveworks/flux/pull/1737)
-
-## 0.6.2 (2019-02-11)
-
-### Improvements
-
- - Allow chart images to be pulled from a private container registry
- [weaveworks/flux#1718](https://github.com/weaveworks/flux/pull/1718)
-
-### Bug fixes
-
- - Fix helm-op allow namespace flag mapping
- [weaveworks/flux#1724](https://github.com/weaveworks/flux/pull/1724)
-
-## 0.6.1 (2019-02-07)
-
-### Improvements
-
- - Updated Flux to `1.10.0` and Helm operator to `0.6.0`
- [weaveworks/flux#1713](https://github.com/weaveworks/flux/pull/1713)
- - Add option to exclude container images
- [weaveworks/flux#1659](https://github.com/weaveworks/flux/pull/1659)
- - Add option to mount custom `repositories.yaml`
- [weaveworks/flux#1671](https://github.com/weaveworks/flux/pull/1671)
- - Add option to limit the Helm operator to a single namespace
- [weaveworks/flux#1664](https://github.com/weaveworks/flux/pull/1664)
-
-### Bug fixes
-
- - Fix custom SSH secret mapping
- [weaveworks/flux#1710](https://github.com/weaveworks/flux/pull/1710)
-
-## 0.6.0 (2019-01-14)
-
-**Note** To fix the connectivity problems between Flux and memcached we've changed the
-memcached service from headless to ClusterIP. This change will make the Helm upgrade fail
-with `ClusterIP field is immutable`.
-
-Before upgrading to 0.6.0 you have to delete the memcached headless service:
-
-```bash
-kubectl -n flux delete svc flux-memcached
-```
-
-### Improvements
-
- - Updated Flux to `1.9.0` and Helm operator to `0.5.3`
- [weaveworks/flux#1662](https://github.com/weaveworks/flux/pull/1662)
- - Add resetValues field to HelmRelease CRD
- [weaveworks/flux#1628](https://github.com/weaveworks/flux/pull/1628)
- - Use ClusterIP service name for connecting to memcached
- [weaveworks/flux#1618](https://github.com/weaveworks/flux/pull/1618)
- - Increase comprehensiveness of values table in `chart/flux/README.md`
- [weaveworks/flux#1626](https://github.com/weaveworks/flux/pull/1626)
- - Rectify error where `resources` are not `None` by default in `chart/flux/values.yaml`
- - Add more fields that are actually in `chart/flux/values.yaml`
- - Separate `replicaCount` into a Flux one and `helmOperator.replicaCount` one
- - Only create the `flux-helm-tls-ca-config` file if `.Values.helmOperator.tls.caContent` exists.
- Useful when doing Flux upgrades but do not happen to know or want to specify
- the `caContent` in `values.yaml`. Otherwise, the existing caContent will be overriden with an
- empty value.
- [weaveworks/flux#1649](https://github.com/weaveworks/flux/pull/1649)
- - Add Flux AWS ECR flags
- [weaveworks/flux#1655](https://github.com/weaveworks/flux/pull/1655)
-
-
-## 0.5.2 (2018-12-20)
-
-### Improvements
-
- - Updated Flux to `v1.8.2` and Helm operator to `v0.5.2`
- [weaveworks/flux#1612](https://github.com/weaveworks/flux/pull/1612)
- - Parameterized the memcached image repo
- [weaveworks/flux#1592](https://github.com/weaveworks/flux/pull/1592)
- - Allow existing service account to be provided on helm install
- [weaveworks/flux#1589](https://github.com/weaveworks/flux/pull/1589)
- - Make SSH known hosts volume optional
- [weaveworks/flux#1544](https://github.com/weaveworks/flux/pull/1544)
-
-### Thanks
-
-Thanks to @davidkarlsen, @stephenmoloney, @batpok, @squaremo,
-@hiddeco and @stefanprodan for their contributions.
-
-## 0.5.1 (2018-11-21)
-
-### Bug fixes
-
- - Removed CRD hook from chart
- [weaveworks/flux#1536](https://github.com/weaveworks/flux/pull/1536)
-
-### Improvements
-
- - Updated Helm operator to `v0.5.1`
- [weaveworks/flux#1536](https://github.com/weaveworks/flux/pull/1536)
- - Updated chart README (removed Helm operator Git flags, fixed typos,
- updated example repo and use the same Git URL format everywhere)
- [weaveworks/flux#1527](https://github.com/weaveworks/flux/pull/1527)
-
-## 0.5.0 (2018-11-16)
-
-### Improvements
-
- - Updated Flux to `v1.8.1` and the Helm operator to `v0.5.0`
- [weaveworks/flux#1522](https://github.com/weaveworks/flux/pull/1522)
- - Adapted chart to new Helm operator CRD and args
- [weaveworks/flux#1382](https://github.com/weaveworks/flux/pull/1382)
-
-## 0.4.1 (2018-11-04)
-
-### Bug fixes
-
- - Fixed indentation of `.Values.helmOperator.tls.caContent`
- [weaveworks/flux#1484](https://github.com/weaveworks/flux/pull/1484)
-
-### Improvements
-
- - Updated Helm operator to `v0.4.0`
- [weaveworks/flux#1487](https://github.com/weaveworks/flux/pull/1487)
- - Added `--tiller-tls-hostname` Helm operator config flag to the chart
- [weaveworks/flux#1484](https://github.com/weaveworks/flux/pull/1484)
- - Include `valueFileSecrets` property in `helm-operator-crd.yaml`
- [weaveworks/flux#1468](https://github.com/weaveworks/flux/pull/1468)
- - Uniform language highlight on Helm chart README
- [weaveworks/flux#1464](https://github.com/weaveworks/flux/pull/1463)
-
-## 0.4.0 (2018-10-25)
-
-### Bug fixes
-
- - Made maximum memcache item size configurable, fixes
- `SERVER_ERROR object too large for cache` errors on large deployments
- [weaveworks/flux#1453](https://github.com/weaveworks/flux/pull/1453)
- - Fixed indentation of `aditionalArgs`
- [weaveworks/flux#1417](https://github.com/weaveworks/flux/pull/1417)
-
-### Improvements
-
- - Updated Flux to `v1.8.0` and the Helm operator to `0.3.0`
- [weaveworks/flux#1470](https://github.com/weaveworks/flux/pull/1470)
- - Deprecated Flux `--registry-cache-expiry` config flag
- [weaveworks/flux#1470](https://github.com/weaveworks/flux/pull/1470)
- - Added and documented multiple values (s.a. `nodeSelector`,
- `extraEnvs`, `git.timeout`)
- [weaveworks/flux#1469](https://github.com/weaveworks/flux/pull/1469)
- [weaveworks/flux#1446](https://github.com/weaveworks/flux/pull/1446)
- [weaveworks/flux#1416](https://github.com/weaveworks/flux/pull/1416)
- - Made it possible to enable Promotheus annotations
- [weaveworks/flux#1462](https://github.com/weaveworks/flux/pull/1462)
-
-## 0.3.4 (2018-09-28)
-
-### Improvements
-
- - Updated Flux to `v1.7.1`
- [weaveworks/flux#1405](https://github.com/weaveworks/flux/pull/1405)
- - Custom SSH keys for Flux and Helm operator are now allowed
- [weaveworks/flux#1391](https://github.com/weaveworks/flux/pull/1391)
-
-## 0.3.3 (2018-09-18)
-
-### Improvements
-
- - Updated Flux to `v1.7.0` and the Helm operator to `v0.2.1`
- [weaveworks/flux#1368](https://github.com/weaveworks/flux/pull/1368)
- - Added memcached verbose option
- [weaveworks/flux#1350](https://github.com/weaveworks/flux/pull/1350)
- - Allow overrides of `.kube/config`
- [weaveworks/flux#1342](https://github.com/weaveworks/flux/pull/1342)
- - Documentation improvements
- [weaveworks/flux#1357](https://github.com/weaveworks/flux/pull/1357)
-
-## 0.3.2 (2018-08-31)
-
-### Improvements
-
- - Updated Flux to `v1.6.0`
- [weaveworks/flux#1330](https://github.com/weaveworks/flux/pull/1330)
- - Made the Helm operator CRD creation optional
- [weaveworks/flux#1311](https://github.com/weaveworks/flux/pull/1311)
-
-## 0.3.0 (2018-08-24)
-
-### Improvements
-
- - Updated Helm operator to `v0.2.0`
- [weaveworks/flux#1308](https://github.com/weaveworks/flux/pull/1308)
- - Added Flux git label and registry options
- [weaveworks/flux#1305](https://github.com/weaveworks/flux/pull/1305)
- - Removed `.Values.git.gitPath` value
- [weaveworks/flux#1305](https://github.com/weaveworks/flux/pull/1305)
- - Documented how to use a private Git host
- [weaveworks/flux#1299](https://github.com/weaveworks/flux/pull/1299)
- - Added option to opt-in to logging of release diffs
- [weaveworks/flux#1271](https://github.com/weaveworks/flux/pull/1272)
-
-## 0.2.2 (2018-08-09)
-
-### Bug fixes
-
- - Fixed indentation of `.Values.ssh.known_hosts`
- [weaveworks/flux#1246](https://github.com/weaveworks/flux/pull/1246)
-
-### Improvements
-
- - Updated Flux to `v1.5.0`
- [weaveworks/flux#1279](https://github.com/weaveworks/flux/pull/1279)
- - Added openAPIV3Schema validation to Helm CRD
- [weaveworks/flux#1253](https://github.com/weaveworks/flux/pull/1253)
- - Fix markdown typo in README
- [weaveworks/flux#1248](https://github.com/weaveworks/flux/pull/1248)
diff --git a/examples/helm-local/chart/Chart.yaml b/examples/helm-local/chart/Chart.yaml
deleted file mode 100755
index fba8fedf37..0000000000
--- a/examples/helm-local/chart/Chart.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-appVersion: 1.18.0
-description: Flux is a tool that automatically ensures that the state of a cluster
- matches what is specified in version control
-engine: gotpl
-home: https://fluxcd.io
-icon: https://raw.githubusercontent.com/fluxcd/flux/master/docs/_files/weave-flux.png
-keywords:
-- gitops
-kubeVersion: '>=1.9.0-0'
-maintainers:
-- email: stefan@weave.works
- name: stefanprodan
-name: flux
-sources:
-- https://github.com/fluxcd/flux
-version: 1.2.0
diff --git a/examples/helm-local/chart/README.md b/examples/helm-local/chart/README.md
deleted file mode 100755
index 17403b0e8a..0000000000
--- a/examples/helm-local/chart/README.md
+++ /dev/null
@@ -1,294 +0,0 @@
-# Flux
-
-Flux is a tool that automatically ensures that the state of a cluster matches the config in git.
-It uses an operator in the cluster to trigger deployments inside Kubernetes, which means you don't need a separate CD tool.
-It monitors all relevant image repositories, detects new images, triggers deployments and updates the desired running
-configuration based on that (and a configurable policy).
-
-## Introduction
-
-This chart bootstraps a [Flux](https://github.com/fluxcd/flux) deployment on
-a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
-
-## Prerequisites
-
-Kubernetes >= v1.11
-
-# Git repo
-
- - One repo containing cluster config (i.e., Kubernetes YAMLs)
- - Example of a test repo: https://github.com/fluxcd/flux-get-started
-
-## Installation
-
-We put together a simple [Get Started
-tutorial](https://docs.fluxcd.io/en/stable/tutorials/get-started-helm.html) which takes about 5-10 minutes to follow.
-You will have a fully working Flux installation deploying workloads to your cluster.
-
-## Installing Flux using Helm
-
-The [configuration](#configuration) section lists all the parameters that can be configured during installation.
-
-### Installing the Chart
-
-Add the Flux repo:
-
-```sh
-helm repo add fluxcd https://charts.fluxcd.io
-```
-
-#### Install the chart with the release name `flux`
-
-1. Create the flux namespace:
-
- ```sh
- kubectl create namespace flux
- ```
-
-1. Replace `fluxcd/flux-get-started` with your own git repository and run helm install:
-
- ```sh
- helm upgrade -i flux fluxcd/flux \
- --set git.url=git@github.com:fluxcd/flux-get-started \
- --namespace flux
- ```
-
-1. Setup Git deploy
-
- > **Note:** this not required when [using git over HTTPS](#flux-with-git-over-https).
-
- At startup Flux generates a SSH key and logs the public key. Find the
- SSH public key by installing [fluxctl](https://docs.fluxcd.io/en/stable/references/fluxctl.html)
- and running:
-
- ```sh
- fluxctl identity --k8s-fwd-ns flux
- ```
-
- In order to sync your cluster state with GitHub you need to copy the
- public key and create a deploy key with access on your GitHub
- repository. Go to _Settings > Deploy keys_ click on _Add deploy key_,
- paste the Flux public key and click _Add key_. If you want Flux to
- have write access to your repo, check _Allow write access_; if you
- have set `git.readonly=true`, you can leave this box unchecked.
-
-#### Flux with git over HTTPS
-
-By setting the `env.secretName`, all key/value pairs in this secret will
-be defined in the Flux container as environment variables. This can be
-utilized in combination with Kubernetes feature of [using environment
-variables inside of your config](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config)
-to securely provide the HTTPS credentials which then can be used in the
-`git.url`.
-
-1. Create a personal access token to be used as the `GIT_AUTHKEY`:
-
- - [GitHub](https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line)
- - [GitLab](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#creating-a-personal-access-token)
- - [BitBucket](https://confluence.atlassian.com/bitbucketserver/personal-access-tokens-939515499.html)
-
-1. Create a secret with your `GIT_AUTHUSER` (the username the token belongs
- to) and the `GIT_AUTHKEY` you created in the first step:
-
- ```sh
- kubectl create secret generic flux-git-auth --namespace flux --from-literal=GIT_AUTHUSER= --from-literal=GIT_AUTHKEY=
- ```
-
-1. Install Flux:
-
- ```sh
- helm upgrade -i flux fluxcd/flux \
- --set git.url='https://$(GIT_AUTHUSER):$(GIT_AUTHKEY)@github.com/fluxcd/flux-get-started.git' \
- --set env.secretName=flux-git-auth \
- --namespace flux
- ```
-
-#### Flux with a private git host
-
-When using a private git host, setting the `ssh.known_hosts` variable
-is required for enabling successful key matches because `StrictHostKeyChecking`
-is enabled during Flux git daemon operations.
-
-By setting the `ssh.known_hosts` variable, a configmap will be created
-called `flux-ssh-config` which in turn will be mounted into a volume named
-`sshdir` at `/root/.ssh/known_hosts`.
-
-1. Get the `ssh.known_hosts` keys by running the following command:
-
- ```sh
- ssh-keyscan
- ```
-
- To prevent a potential man-in-the-middle attack, one should
- verify the ssh keys acquired through the `ssh-keyscan` match expectations
- using an alternate mechanism.
-
-1. Install Flux:
-
- - Using a string for setting `known_hosts`
-
- ```sh
- YOUR_GIT_HOST=your_git_host.example.com
- YOUR_GIT_USER=your_git_user
- KNOWN_HOSTS='domain ssh-rsa line1
- domain ecdsa-sha2-line2
- domain ssh-ed25519 line3'
-
- helm upgrade -i flux fluxcd/flux \
- --set git.url="git@${YOUR_GIT_HOST}:${YOUR_GIT_USER}/flux-get-started" \
- --set-string ssh.known_hosts="${KNOWN_HOSTS}" \
- --namespace flux
- ```
-
- - Using a file for setting `known_hosts`
-
- Copy `known_hosts` keys into a temporary file `/tmp/flux_known_hosts`
-
- ```sh
- YOUR_GIT_HOST=your_git_host.example.com
- YOUR_GIT_USER=your_git_user
-
- helm upgrade -i flux fluxcd/flux \
- --set git.url="git@${YOUR_GIT_HOST}:${YOUR_GIT_USER}/flux-get-started" \
- --set-file ssh.known_hosts=/tmp/flux_known_hosts \
- --namespace flux
- ```
-
-#### Connect Flux to a Weave Cloud instance
-
-```sh
-helm upgrade -i flux fluxcd/flux \
---set git.url=git@github.com:fluxcd/flux-get-started \
---set token=YOUR_WEAVE_CLOUD_SERVICE_TOKEN \
---namespace flux
-```
-
-### Uninstalling the Chart
-
-To uninstall/delete the `flux` deployment:
-
-```sh
-helm delete flux
-```
-
-The command removes all the Kubernetes components associated with the chart and deletes the release.
-You should also remove the deploy key from your GitHub repository.
-
-### Configuration
-
-The following tables lists the configurable parameters of the Flux chart and their default values.
-
-| Parameter | Default | Description
-| ----------------------------------------------- | ---------------------------------------------------- | ---
-| `image.repository` | `docker.io/fluxcd/flux` | Image repository
-| `image.tag` | `` | Image tag
-| `replicaCount` | `1` | Number of Flux pods to deploy, more than one is not desirable.
-| `image.pullPolicy` | `IfNotPresent` | Image pull policy
-| `image.pullSecret` | `None` | Image pull secret
-| `logFormat` | `fmt` | Log format (fmt or json)
-| `resources.requests.cpu` | `50m` | CPU resource requests for the Flux deployment
-| `resources.requests.memory` | `64Mi` | Memory resource requests for the Flux deployment
-| `resources.limits` | `None` | CPU/memory resource limits for the Flux deployment
-| `nodeSelector` | `{}` | Node Selector properties for the Flux deployment
-| `tolerations` | `[]` | Tolerations properties for the Flux deployment
-| `affinity` | `{}` | Affinity properties for the Flux deployment
-| `extraVolumeMounts` | `[]` | Extra volumes mounts
-| `extraVolumes` | `[]` | Extra volumes
-| `dnsPolicy` | `` | Pod DNS policy
-| `dnsConfig` | `` | Pod DNS config
-| `token` | `None` | Weave Cloud service token
-| `extraEnvs` | `[]` | Extra environment variables for the Flux pod(s)
-| `env.secretName` | `` | Name of the secret that contains environment variables which should be defined in the Flux container (using `envFrom`)
-| `rbac.create` | `true` | If `true`, create and use RBAC resources
-| `rbac.pspEnabled` | `false` | If `true`, create and use a restricted pod security policy for Flux pod(s)
-| `allowedNamespaces` | `[]` | Allow flux to manage resources in the specified namespaces. The namespace flux is deployed in will always be included
-| `serviceAccount.create` | `true` | If `true`, create a new service account
-| `serviceAccount.name` | `flux` | Service account to be used
-| `serviceAccount.annotations` | `` | Additional Service Account annotations
-| `clusterRole.create` | `true` | If `false`, Flux will be restricted to the namespaces given in `allowedNamespaces` and the namespace where it is deployed
-| `service.type` | `ClusterIP` | Service type to be used (exposing the Flux API outside of the cluster is not advised)
-| `service.port` | `3030` | Service port to be used
-| `sync.state` | `git` | Where to keep sync state; either a tag in the upstream repo (`git`), or as an annotation on the SSH secret (`secret`)
-| `sync.timeout` | `None` | Duration after which sync operations time out (defaults to `1m`)
-| `sync.interval` | `` | Controls how often Flux will apply what’s in git, to the cluster, absent new commits (defaults to `git.pollInterval`)
-| `git.url` | `None` | URL of git repo with Kubernetes manifests
-| `git.readonly` | `false` | If `true`, the git repo will be considered read-only, Flux will not attempt to write to it
-| `git.branch` | `master` | Branch of git repo to use for Kubernetes manifests
-| `git.path` | `None` | Path within git repo to locate Kubernetes manifests (relative path)
-| `git.user` | `Weave Flux` | Username to use as git committer
-| `git.email` | `support@weave.works` | Email to use as git committer
-| `git.setAuthor` | `false` | If set, the author of git commits will reflect the user who initiated the commit and will differ from the git committer.
-| `git.signingKey` | `None` | If set, commits will be signed with this GPG key
-| `git.verifySignatures` | `false` | If set, the signatures of the sync tag and commits will be verified
-| `git.label` | `flux-sync` | Label to keep track of sync progress, used to tag the Git branch
-| `git.ciSkip` | `false` | Append "[ci skip]" to commit messages so that CI will skip builds
-| `git.pollInterval` | `5m` | Period at which to poll git repo for new commits
-| `git.timeout` | `20s` | Duration after which git operations time out
-| `git.secretName` | `None` | Kubernetes secret with the SSH private key. Superseded by `helmOperator.git.secretName` if set.
-| `git.config.enabled` | `false` | Mount `$HOME/.gitconfig` via Secret into the Flux and HelmOperator Pods, allowing for custom global Git configuration
-| `git.config.secretName` | `Computed` | Kubernetes secret with the global Git configuration
-| `git.config.data` | `None` | Global Git configuration per [git-config](https://git-scm.com/docs/git-config)
-| `podLabels` | `{}` | Additional labels for the Flux pod
-| `gpgKeys.secretName` | `None` | Kubernetes secret with GPG keys the Flux daemon should import
-| `gpgKeys.configMapName` | `None` | Kubernetes config map with public GPG keys the Flux daemon should import
-| `sops.enabled` | `false` | If `true` SOPS support will be enabled
-| `ssh.known_hosts` | `None` | The contents of an SSH `known_hosts` file, if you need to supply host key(s)
-| `registry.automationInterval` | `5m` | Period at which to check for updated images
-| `registry.rps` | `200` | Maximum registry requests per second per host
-| `registry.burst` | `125` | Maximum number of warmer connections to remote and memcache
-| `registry.trace` | `false` | Output trace of image registry requests to log
-| `registry.insecureHosts` | `None` | Use HTTP rather than HTTPS for the image registry domains
-| `registry.cacheExpiry` | `None` | Duration to keep cached image info (deprecated)
-| `registry.disableScanning` | `false` | Disable registry scanning completely. Flux will be deployed without memcached
-| `registry.excludeImage` | `None` | Do not scan images that match these glob expressions; if empty, 'k8s.gcr.io/*' images are excluded
-| `registry.useTimestampLabels` | `None` | Allow usage of (RFC3339) timestamp labels from (canonical) image refs that match these glob expressions; if empty, 'index.docker.io/{weaveworks,fluxcd}/*' images are allowed
-| `registry.ecr.region` | `None` | Restrict ECR scanning to these AWS regions; if empty, only the cluster's region will be scanned
-| `registry.ecr.includeId` | `None` | Restrict ECR scanning to these AWS account IDs; if empty, all account IDs that aren't excluded may be scanned
-| `registry.ecr.excludeId` | `602401143452` | Do not scan ECR for images in these AWS account IDs; the default is to exclude the EKS system account
-| `registry.ecr.require` | `false` | Refuse to start if the AWS API is not available
-| `registry.acr.enabled` | `false` | Mount `azure.json` via HostPath into the Flux Pod, enabling Flux to use AKS's service principal for ACR authentication
-| `registry.acr.hostPath` | `/etc/kubernetes/azure.json` | Alternative location of `azure.json` on the host
-| `registry.acr.secretName` | `None` | Secret to mount instead of a hostPath
-| `registry.dockercfg.enabled` | `false` | Mount `config.json` via Secret into the Flux Pod, enabling Flux to use a custom docker config file
-| `registry.dockercfg.secretName` | `None` | Kubernetes secret with the docker config.json
-| `registry.dockercfg.configFileName` | `/dockercfg/config.json` | Alternative path/name of the docker config.json
-| `memcached.enabled` | `true` | Create a memcached deployment and service. When set to `false` you must set an external memcached service.
-| `memcached.hostnameOverride` | `None` | Override the hostname to the memcached service. Useful when using memcached deployed separately from this chart.
-| `memcached.verbose` | `false` | Enable request logging in memcached
-| `memcached.maxItemSize` | `5m` | Maximum size for one item
-| `memcached.maxMemory` | `128` | Maximum memory to use, in megabytes
-| `memcached.pullSecret` | `None` | Image pull secret
-| `memcached.repository` | `memcached` | Image repository
-| `memcached.resources` | `None` | CPU/memory resource requests/limits for memcached
-| `memcached.securityContext` | [See values.yaml](/chart/flux/values.yaml#L176-L179) | Container security context for memcached
-| `memcached.nodeSelector` | `{}` | Node Selector properties for the memcached deployment
-| `memcached.tolerations` | `[]` | Tolerations properties for the memcached deployment
-| `kube.config` | [See values.yaml](/chart/flux/values.yaml#L200-L212) | Override for kubectl default config in the Flux pod(s).
-| `prometheus.enabled` | `false` | If enabled, adds prometheus annotations to Flux and helmOperator pod(s)
-| `prometheus.serviceMonitor.create` | `false` | Set to true if using the Prometheus Operator
-| `prometheus.serviceMonitor.interval` | `` | Interval at which metrics should be scraped
-| `prometheus.serviceMonitor.namespace` | `` | The namespace where the ServiceMonitor is deployed
-| `prometheus.serviceMonitor.additionalLabels` | `{}` | Additional labels to add to the ServiceMonitor
-| `syncGarbageCollection.enabled` | `false` | If enabled, fluxd will delete resources that it created, but are no longer present in git (see [garbage collection](/docs/references/garbagecollection.md))
-| `syncGarbageCollection.dry` | `false` | If enabled, fluxd won't delete any resources, but log the garbage collection output (see [garbage collection](/docs/references/garbagecollection.md))
-| `manifestGeneration` | `false` | If enabled, fluxd will look for `.flux.yaml` and run Kustomize or other manifest generators
-| `hostAliases` | `{}` | Additional hostAliases to add to the Flux pod(s). See
-
-Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example:
-
-```sh
-helm upgrade -i flux fluxcd/flux \
---set git.url=git@github.com:stefanprodan/k8s-podinfo \
---set git.path="deploy/auto-scaling\,deploy/local-storage" \
---namespace flux \
-fluxcd/flux
-```
-
-### Upgrade
-
-Update Flux version with:
-
-```sh
-helm upgrade --reuse-values flux fluxcd/flux \
---set image.tag=1.17.1
-```
diff --git a/examples/helm-local/chart/templates/NOTES.txt b/examples/helm-local/chart/templates/NOTES.txt
deleted file mode 100755
index 1ac035c49d..0000000000
--- a/examples/helm-local/chart/templates/NOTES.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-Get the Git deploy key by either (a) running
-
- kubectl -n {{ .Release.Namespace }} logs deployment/{{ .Release.Name }} | grep identity.pub | cut -d '"' -f2
-
-or by (b) installing fluxctl through
-https://docs.fluxcd.io/en/latest/references/fluxctl.html#installing-fluxctl
-and running:
-
- fluxctl identity --k8s-fwd-ns {{ .Release.Namespace }}
-
diff --git a/examples/helm-local/chart/templates/_helpers.tpl b/examples/helm-local/chart/templates/_helpers.tpl
deleted file mode 100755
index fb85aecf0c..0000000000
--- a/examples/helm-local/chart/templates/_helpers.tpl
+++ /dev/null
@@ -1,86 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "flux.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "flux.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "flux.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "flux.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create -}}
- {{ default (include "flux.fullname" .) .Values.serviceAccount.name }}
-{{- else -}}
- {{ default "default" .Values.serviceAccount.name }}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create the name of the cluster role to use
-*/}}
-{{- define "flux.clusterRoleName" -}}
-{{- if .Values.clusterRole.create -}}
- {{ default (include "flux.fullname" .) .Values.clusterRole.name }}
-{{- else -}}
- {{ default "default" .Values.clusterRole.name }}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create a custom repositories.yaml for Helm
-*/}}
-{{- define "flux.customRepositories" -}}
-apiVersion: v1
-generated: 0001-01-01T00:00:00Z
-repositories:
-{{- range .Values.helmOperator.configureRepositories.repositories }}
-- name: {{ required "Please specify a name for the Helm repo" .name }}
- url: {{ required "Please specify a URL for the Helm repo" .url }}
- cache: /var/fluxd/helm/repository/cache/{{ .name }}-index.yaml
- caFile: ""
- certFile: ""
- keyFile: ""
- password: "{{ .password | default "" }}"
- username: "{{ .username | default "" }}"
-{{- end }}
-{{- end -}}
-
-{{/*
-Create the name of the Git config Secret.
-*/}}
-{{- define "git.config.secretName" -}}
-{{- if .Values.git.config.enabled }}
- {{- if .Values.git.config.secretName -}}
- {{ default "default" .Values.git.config.secretName }}
- {{- else -}}
- {{ default (printf "%s-git-config" (include "flux.fullname" .)) }}
-{{- end -}}
-{{- end }}
-{{- end }}
diff --git a/examples/helm-local/chart/templates/deployment.yaml b/examples/helm-local/chart/templates/deployment.yaml
deleted file mode 100755
index e262e29169..0000000000
--- a/examples/helm-local/chart/templates/deployment.yaml
+++ /dev/null
@@ -1,303 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "flux.fullname" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-spec:
- replicas: {{ .Values.replicaCount }}
- selector:
- matchLabels:
- app: {{ template "flux.name" . }}
- release: {{ .Release.Name }}
- template:
- metadata:
- annotations:
- {{- if .Values.prometheus.enabled }}
- prometheus.io/scrape: "true"
- {{- end }}
- {{- if .Values.annotations }}
- {{- .Values.annotations | toYaml | trimSuffix "\n" | nindent 8 }}
- {{- end }}
- labels:
- app: {{ template "flux.name" . }}
- release: {{ .Release.Name }}
- {{- if .Values.podLabels }}
- {{- range $key, $value := .Values.podLabels }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- end }}
- spec:
- serviceAccountName: {{ template "flux.serviceAccountName" . }}
- {{- if .Values.image.pullSecret }}
- imagePullSecrets:
- - name: {{ .Values.image.pullSecret }}
- {{- end }}
- volumes:
- - name: kubedir
- configMap:
- name: {{ template "flux.fullname" . }}-kube-config
- {{- if .Values.ssh.known_hosts }}
- - name: sshdir
- configMap:
- name: {{ template "flux.fullname" . }}-ssh-config
- defaultMode: 0600
- {{- end }}
- {{- if .Values.git.config.enabled }}
- - name: git-config
- secret:
- secretName: {{ include "git.config.secretName" . }}
- defaultMode: 0400
- {{- end }}
- - name: git-key
- secret:
- {{- if .Values.git.secretName }}
- secretName: {{ .Values.git.secretName }}
- {{- else }}
- secretName: {{ template "flux.fullname" . }}-git-deploy
- {{- end }}
- defaultMode: 0400
- - name: git-keygen
- emptyDir:
- medium: Memory
- {{- if .Values.registry.acr.enabled }}
- - name: acr-credentials
- {{- if (not .Values.registry.acr.secretName) }}
- hostPath:
- path: "{{ .Values.registry.acr.hostPath }}"
- type: ""
- {{- else }}
- secret:
- secretName: {{ .Values.registry.acr.secretName }}
- {{- end }}
- {{- end }}
- {{- if .Values.registry.dockercfg.enabled }}
- - name: docker-credentials
- secret:
- secretName: "{{ .Values.registry.dockercfg.secretName }}"
- {{- end }}
- {{- if .Values.gpgKeys.secretName }}
- - name: gpg-keys
- secret:
- secretName: {{ .Values.gpgKeys.secretName }}
- defaultMode: 0400
- {{- end }}
- {{- if .Values.gpgKeys.configMapName }}
- - name: gpg-public-keys
- configMap:
- name: {{ .Values.gpgKeys.configMapName }}
- defaultMode: 0400
- {{- end }}
- {{- if .Values.extraVolumes }}
-{{ toYaml .Values.extraVolumes | indent 6 }}
- {{- end }}
-{{- if .Values.initContainers }}
- initContainers:
-{{- range $key, $value := .Values.initContainers }}
- - name: {{ $key }}
-{{ toYaml $value | indent 10 }}
-{{- end }}
-{{- end }}
- containers:
- - name: {{ .Chart.Name }}
- image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
- imagePullPolicy: {{ .Values.image.pullPolicy }}
- ports:
- - name: http
- containerPort: 3030
- protocol: TCP
- livenessProbe:
- httpGet:
- port: 3030
- path: /api/flux/v6/identity.pub
- initialDelaySeconds: 5
- timeoutSeconds: 5
- readinessProbe:
- httpGet:
- port: 3030
- path: /api/flux/v6/identity.pub
- initialDelaySeconds: 5
- timeoutSeconds: 5
- volumeMounts:
- - name: kubedir
- mountPath: /root/.kubectl
- {{- if .Values.ssh.known_hosts }}
- - name: sshdir
- mountPath: /root/.ssh
- readOnly: true
- {{- end }}
- {{- if .Values.git.config.enabled }}
- - name: git-config
- mountPath: /root/.gitconfig
- subPath: gitconfig
- readOnly: true
- {{- end }}
- - name: git-key
- mountPath: /etc/fluxd/ssh
- readOnly: true
- - name: git-keygen
- mountPath: /var/fluxd/keygen
- {{- if .Values.registry.acr.enabled }}
- - name: acr-credentials
- {{- if not .Values.registry.acr.secretName }}
- mountPath: /etc/kubernetes/azure.json
- {{- else }}
- mountPath: /etc/kubernetes/
- {{- end }}
- readOnly: true
- {{- end }}
- {{- if .Values.registry.dockercfg.enabled }}
- - name: docker-credentials
- mountPath: /dockercfg/
- readOnly: true
- {{- end }}
- {{- if .Values.gpgKeys.secretName }}
- - name: gpg-keys
- mountPath: /root/gpg-import/private
- readOnly: true
- {{- end }}
- {{- if .Values.gpgKeys.configMapName }}
- - name: gpg-public-keys
- mountPath: /root/gpg-import/public
- readOnly: true
- {{- end }}
- {{- if .Values.extraVolumeMounts }}
-{{ toYaml .Values.extraVolumeMounts | indent 10 }}
- {{- end }}
- env:
- - name: KUBECONFIG
- value: /root/.kubectl/config
- {{- if .Values.extraEnvs }}
-{{ toYaml .Values.extraEnvs | indent 10 }}
- {{- end }}
- {{- if .Values.env.secretName }}
- envFrom:
- - secretRef:
- name: {{ .Values.env.secretName }}
- {{- end }}
- args:
- {{- if not .Values.clusterRole.create }}
- - --k8s-allow-namespace={{ join "," (append .Values.allowedNamespaces .Release.Namespace) }}
- {{- end}}
- {{- if .Values.logFormat }}
- - --log-format={{ .Values.logFormat }}
- {{end}}
- - --ssh-keygen-dir=/var/fluxd/keygen
- - --k8s-secret-name={{ .Values.git.secretName | default (printf "%s-git-deploy" (include "flux.fullname" .)) }}
- - --memcached-hostname={{ .Values.memcached.hostnameOverride | default (printf "%s-memcached" (include "flux.fullname" .)) }}
- - --sync-state={{ .Values.sync.state }}
- {{- if .Values.sync.timeout }}
- - --sync-timeout={{ .Values.sync.timeout }}
- {{- end }}
- {{- if .Values.memcached.createClusterIP }}
- - --memcached-service=
- {{- end }}
- - --git-url={{ .Values.git.url }}
- - --git-branch={{ .Values.git.branch }}
- - --git-path={{ .Values.git.path }}
- - --git-readonly={{ .Values.git.readonly }}
- - --git-user={{ .Values.git.user }}
- - --git-email={{ .Values.git.email }}
- {{- if (and .Values.gpgKeys.secretName .Values.gpgKeys.configMapName) }}
- - --git-gpg-key-import=/root/gpg-import/private,/root/gpg-import/public
- {{- else if .Values.gpgKeys.secretName }}
- - --git-gpg-key-import=/root/gpg-import/private
- {{- else if .Values.gpgKeys.configMapName }}
- - --git-gpg-key-import=/root/gpg-import/public
- {{- end -}}
- {{- if .Values.git.signingKey }}
- - --git-signing-key={{ .Values.git.signingKey }}
- {{- end }}
- - --git-verify-signatures={{ .Values.git.verifySignatures }}
- - --git-set-author={{ .Values.git.setAuthor }}
- - --git-poll-interval={{ .Values.git.pollInterval }}
- - --git-timeout={{ .Values.git.timeout }}
- - --sync-interval={{ .Values.sync.interval | default .Values.git.pollInterval }}
- - --git-ci-skip={{ .Values.git.ciSkip }}
- {{- if .Values.git.label }}
- - --git-label={{ .Values.git.label }}
- {{- end }}
- {{- if .Values.sops.enabled }}
- - --sops={{ .Values.sops.enabled }}
- {{- end }}
- {{- if .Values.manifestGeneration }}
- - --manifest-generation=true
- {{- end }}
- - --automation-interval={{ .Values.registry.pollInterval | default .Values.registry.automationInterval }}
- - --registry-rps={{ .Values.registry.rps }}
- - --registry-burst={{ .Values.registry.burst }}
- - --registry-trace={{ .Values.registry.trace }}
- {{- if .Values.registry.ecr.require }}
- - --registry-require=ecr
- {{- end }}
- {{- if .Values.registry.insecureHosts }}
- - --registry-insecure-host={{ .Values.registry.insecureHosts }}
- {{- end }}
- {{- if .Values.registry.cacheExpiry }}
- - --registry-cache-expiry={{ .Values.registry.cacheExpiry }}
- {{- end }}
- {{- if .Values.registry.disableScanning }}
- - --registry-disable-scanning
- {{- end }}
- {{- if .Values.registry.excludeImage }}
- - --registry-exclude-image={{ .Values.registry.excludeImage }}
- {{- end }}
- {{- if .Values.registry.useTimestampLabels }}
- - --registry-use-labels={{ .Values.registry.useTimestampLabels }}
- {{- end }}
- {{- if .Values.registry.ecr.region }}
- - --registry-ecr-region={{ .Values.registry.ecr.region }}
- {{- end }}
- {{- if .Values.registry.ecr.includeId }}
- - --registry-ecr-include-id={{ .Values.registry.ecr.includeId }}
- {{- end }}
- {{- if .Values.registry.ecr.excludeId }}
- - --registry-ecr-exclude-id={{ .Values.registry.ecr.excludeId }}
- {{- end }}
- {{- if .Values.registry.dockercfg.enabled }}
- - --docker-config={{ .Values.registry.dockercfg.configFileName }}
- {{- end }}
- {{- if .Values.token }}
- - --connect=wss://cloud.weave.works/api/flux
- - --token={{ .Values.token }}
- {{- end }}
- {{- if and .Values.syncGarbageCollection.enabled (not .Values.syncGarbageCollection.dry) }}
- - --sync-garbage-collection={{ .Values.syncGarbageCollection.enabled }}
- {{- else if .Values.syncGarbageCollection.dry }}
- - --sync-garbage-collection-dry={{ .Values.syncGarbageCollection.dry }}
- {{- end }}
- {{- if .Values.additionalArgs }}
-{{ toYaml .Values.additionalArgs | indent 10 }}
- {{- end }}
- resources:
-{{ toYaml .Values.resources | indent 12 }}
-{{- if .Values.hostAliases }}
- hostAliases:
-{{ toYaml .Values.hostAliases |indent 8 }}
-{{- end }}
-{{- if .Values.extraContainers }}
-{{ toYaml .Values.extraContainers | indent 8}}
-{{- end }}
-{{- if .Values.dnsPolicy }}
- dnsPolicy: {{ .Values.dnsPolicy }}
-{{- end }}
-{{- if .Values.dnsConfig }}
- dnsConfig:
-{{ toYaml .Values.dnsConfig | indent 8 }}
-{{- end }}
- nodeSelector:
- beta.kubernetes.io/os: linux
- {{- with .Values.nodeSelector }}
-{{ toYaml . | indent 8 }}
- {{- end }}
- {{- with .Values.affinity }}
- affinity:
-{{ toYaml . | indent 8 }}
- {{- end }}
- {{- with .Values.tolerations }}
- tolerations:
-{{ toYaml . | indent 8 }}
- {{- end }}
diff --git a/examples/helm-local/chart/templates/gitconfig.yaml b/examples/helm-local/chart/templates/gitconfig.yaml
deleted file mode 100755
index a08bd22119..0000000000
--- a/examples/helm-local/chart/templates/gitconfig.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-{{- if .Values.git.config.enabled -}}
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ include "git.config.secretName" . }}
-type: Opaque
-data:
- gitconfig: {{ .Values.git.config.data | b64enc }}
-{{- end -}}
diff --git a/examples/helm-local/chart/templates/kube.yaml b/examples/helm-local/chart/templates/kube.yaml
deleted file mode 100755
index 53c79b40de..0000000000
--- a/examples/helm-local/chart/templates/kube.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "flux.fullname" . }}-kube-config
-data:
- config: |
- {{- if not .Values.clusterRole.create }}
- apiVersion: v1
- clusters: []
- contexts:
- - context:
- cluster: ""
- namespace: {{ .Release.Namespace }}
- user: ""
- name: default
- current-context: default
- kind: Config
- preferences: {}
- users: []
- {{- else if .Values.kube.config }}
- {{- if contains "\n" .Values.kube.config }}
- {{- range $value := .Values.kube.config | splitList "\n" }}
- {{ print $value }}
- {{- end }}
- {{- else }}
- {{ .Values.kube.config }}
- {{- end }}
- {{- end }}
\ No newline at end of file
diff --git a/examples/helm-local/chart/templates/memcached.yaml b/examples/helm-local/chart/templates/memcached.yaml
deleted file mode 100755
index 3cac2f71d3..0000000000
--- a/examples/helm-local/chart/templates/memcached.yaml
+++ /dev/null
@@ -1,91 +0,0 @@
-{{- if and (eq .Values.memcached.enabled true) (eq .Values.registry.disableScanning false) }}
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- name: {{ template "flux.fullname" . }}-memcached
- labels:
- app: {{ template "flux.name" . }}-memcached
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-spec:
- replicas: 1
- strategy:
- type: Recreate
- selector:
- matchLabels:
- app: {{ template "flux.name" . }}-memcached
- release: {{ .Release.Name }}
- template:
- metadata:
- {{- if .Values.memcached.annotations }}
- annotations:
- {{- .Values.memcached.annotations | toYaml | trimSuffix "\n" | nindent 8 }}
- {{- end }}
- labels:
- app: {{ template "flux.name" . }}-memcached
- release: {{ .Release.Name }}
- spec:
- {{- if .Values.memcached.pullSecret }}
- imagePullSecrets:
- - name: {{ .Values.memcached.pullSecret }}
- {{- end }}
- {{- if .Values.rbac.pspEnabled }}
- serviceAccountName: {{ template "flux.serviceAccountName" . }}
- {{- end }}
- containers:
- - name: memcached
- image: {{ .Values.memcached.repository }}:{{ .Values.memcached.tag }}
- imagePullPolicy: IfNotPresent
- args:
- - -m {{ .Values.memcached.maxMemory }} # Maximum memory to use, in megabytes. 64MB is default.
- - -p 11211 # Default port, but being explicit is nice.
- - -I {{ .Values.memcached.maxItemSize }} # Maximum size for one item
- {{- if .Values.memcached.verbose }}
- - -vv # This gets us to the level of request logs.
- {{- end }}
- ports:
- - name: memcached
- containerPort: 11211
- resources:
-{{ toYaml .Values.memcached.resources | indent 10 }}
- {{- if .Values.memcached.addSecurityContext }}
- securityContext:
-{{ toYaml .Values.memcached.securityContext | indent 10 }}
- {{- end }}
- nodeSelector:
- beta.kubernetes.io/os: linux
- {{- with .Values.memcached.nodeSelector }}
-{{ toYaml . | indent 8 }}
- {{- end }}
- {{- with .Values.memcached.affinity }}
- affinity:
-{{ toYaml . | indent 8 }}
- {{- end }}
- {{- with .Values.memcached.tolerations }}
- tolerations:
-{{ toYaml . | indent 8 }}
- {{- end }}
----
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ template "flux.fullname" . }}-memcached
- labels:
- app: {{ template "flux.name" . }}-memcached
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-spec:
- {{- if eq .Values.memcached.createClusterIP false }}
- clusterIP: None
- {{- end }}
- ports:
- - port: 11211
- targetPort: memcached
- protocol: TCP
- name: memcached
- selector:
- app: {{ template "flux.name" . }}-memcached
- release: {{ .Release.Name }}
-{{- end }}
diff --git a/examples/helm-local/chart/templates/psp.yaml b/examples/helm-local/chart/templates/psp.yaml
deleted file mode 100755
index eaa0c15b37..0000000000
--- a/examples/helm-local/chart/templates/psp.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
-{{- if .Values.rbac.pspEnabled }}
-apiVersion: policy/v1beta1
-kind: PodSecurityPolicy
-metadata:
- name: {{ template "flux.fullname" . }}
- labels:
- app: {{ include "flux.name" . }}
- chart: {{ include "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- annotations:
- seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
-spec:
- privileged: false
- hostIPC: false
- hostNetwork: false
- hostPID: false
- readOnlyRootFilesystem: false
- allowPrivilegeEscalation: true
- allowedCapabilities:
- - '*'
- fsGroup:
- rule: RunAsAny
- runAsUser:
- rule: RunAsAny
- seLinux:
- rule: RunAsAny
- supplementalGroups:
- rule: RunAsAny
- volumes:
- - '*'
----
-kind: ClusterRole
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: {{ template "flux.fullname" . }}-psp
- labels:
- app: {{ include "flux.name" . }}
- chart: {{ include "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-rules:
- - apiGroups: ['policy']
- resources: ['podsecuritypolicies']
- verbs: ['use']
- resourceNames:
- - {{ template "flux.fullname" . }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: {{ template "flux.fullname" . }}-psp
- labels:
- app: {{ include "flux.name" . }}
- chart: {{ include "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: {{ template "flux.fullname" . }}-psp
-subjects:
- - kind: ServiceAccount
- name: {{ template "flux.serviceAccountName" . }}
- namespace: {{ .Release.Namespace }}
-{{- end }}
diff --git a/examples/helm-local/chart/templates/rbac-role.yaml b/examples/helm-local/chart/templates/rbac-role.yaml
deleted file mode 100755
index 00f33021ef..0000000000
--- a/examples/helm-local/chart/templates/rbac-role.yaml
+++ /dev/null
@@ -1,82 +0,0 @@
-{{- if and .Values.rbac.create (eq .Values.clusterRole.create false) -}}
-{{- range $namespace := (append .Values.allowedNamespaces .Release.Namespace) }}
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: Role
-metadata:
- name: {{ template "flux.fullname" $ }}
- namespace: {{ $namespace }}
- labels:
- app: {{ template "flux.name" $ }}
- chart: {{ template "flux.chart" $ }}
- release: {{ $.Release.Name }}
- heritage: {{ $.Release.Service }}
-rules:
- - apiGroups:
- - '*'
- resources:
- - '*'
- verbs:
- - '*'
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: RoleBinding
-metadata:
- name: {{ template "flux.fullname" $ }}
- namespace: {{ $namespace }}
- labels:
- app: {{ template "flux.name" $ }}
- chart: {{ template "flux.chart" $ }}
- release: {{ $.Release.Name }}
- heritage: {{ $.Release.Service }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: Role
- name: {{ template "flux.fullname" $ }}
-subjects:
- - name: {{ template "flux.serviceAccountName" $ }}
- namespace: {{ $.Release.Namespace | quote }}
- kind: ServiceAccount
----
-{{- end }}
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
- name: {{ template "flux.fullname" . }}-crd
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-rules:
- - apiGroups:
- - apiextensions.k8s.io
- resources:
- - customresourcedefinitions
- verbs:
- - list
- - watch
- - apiGroups:
- - ""
- resources:
- - namespaces
- verbs:
- - list
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
- name: {{ template "flux.fullname" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: {{ template "flux.fullname" . }}-crd
-subjects:
- - name: {{ template "flux.serviceAccountName" . }}
- namespace: {{ .Release.Namespace | quote }}
- kind: ServiceAccount
-{{- end -}}
diff --git a/examples/helm-local/chart/templates/rbac.yaml b/examples/helm-local/chart/templates/rbac.yaml
deleted file mode 100755
index 3545dd6bb9..0000000000
--- a/examples/helm-local/chart/templates/rbac.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-{{- if .Values.rbac.create -}}
-{{if .Values.clusterRole.create -}}
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRole
-metadata:
- name: {{ template "flux.clusterRoleName" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-rules:
- - apiGroups:
- - '*'
- resources:
- - '*'
- verbs:
- - '*'
- - nonResourceURLs:
- - '*'
- verbs:
- - '*'
-{{- end -}}
-{{- if or .Values.clusterRole.create .Values.clusterRole.name }}
----
-apiVersion: rbac.authorization.k8s.io/v1beta1
-kind: ClusterRoleBinding
-metadata:
- name: {{ template "flux.clusterRoleName" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-roleRef:
- apiGroup: rbac.authorization.k8s.io
- kind: ClusterRole
- name: {{ template "flux.clusterRoleName" . }}
-subjects:
- - name: {{ template "flux.serviceAccountName" . }}
- namespace: {{ .Release.Namespace | quote }}
- kind: ServiceAccount
-{{- end -}}
-{{- end -}}
diff --git a/examples/helm-local/chart/templates/secret.yaml b/examples/helm-local/chart/templates/secret.yaml
deleted file mode 100755
index 958baff562..0000000000
--- a/examples/helm-local/chart/templates/secret.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-{{- if not .Values.git.secretName -}}
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ template "flux.fullname" . }}-git-deploy
-type: Opaque
-{{- end -}}
diff --git a/examples/helm-local/chart/templates/service.yaml b/examples/helm-local/chart/templates/service.yaml
deleted file mode 100755
index e4ce55619b..0000000000
--- a/examples/helm-local/chart/templates/service.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: {{ template "flux.fullname" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
-spec:
- type: {{ .Values.service.type }}
- ports:
- - port: {{ .Values.service.port }}
- targetPort: http
- protocol: TCP
- name: http
- selector:
- app: {{ template "flux.name" . }}
- release: {{ .Release.Name }}
diff --git a/examples/helm-local/chart/templates/serviceaccount.yaml b/examples/helm-local/chart/templates/serviceaccount.yaml
deleted file mode 100755
index 9a0c7c38a2..0000000000
--- a/examples/helm-local/chart/templates/serviceaccount.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-{{- if .Values.serviceAccount.create -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ template "flux.serviceAccountName" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- {{- if .Values.serviceAccount.annotations }}
- annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
- {{- end }}
-{{- end -}}
diff --git a/examples/helm-local/chart/templates/servicemonitor.yaml b/examples/helm-local/chart/templates/servicemonitor.yaml
deleted file mode 100755
index e7ac5d585d..0000000000
--- a/examples/helm-local/chart/templates/servicemonitor.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-{{ if .Values.prometheus.serviceMonitor.create }}
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
- name: {{ template "flux.fullname" . }}
- labels:
- app: {{ template "flux.name" . }}
- chart: {{ template "flux.chart" . }}
- release: {{ .Release.Name }}
- heritage: {{ .Release.Service }}
- {{- range $key, $value := .Values.prometheus.serviceMonitor.additionalLabels }}
- {{ $key }}: {{ $value | quote }}
- {{- end }}
- {{- with .Values.prometheus.serviceMonitor.namespace }}
- namespace: {{ . }}
- {{- end }}
-spec:
- endpoints:
- - port: http
- honorLabels: true
- {{- with .Values.prometheus.serviceMonitor.interval }}
- interval: {{ . }}
- {{- end }}
- {{- with .Values.prometheus.serviceMonitor.scrapeTimeout }}
- scrapeTimeout: {{ . }}
- {{- end }}
- namespaceSelector:
- matchNames:
- - {{ .Release.Namespace }}
- selector:
- matchLabels:
- app: {{ template "flux.name" . }}
- release: {{ .Release.Name }}
-{{- end }}
diff --git a/examples/helm-local/chart/templates/ssh.yaml b/examples/helm-local/chart/templates/ssh.yaml
deleted file mode 100755
index 70b8d1bdf4..0000000000
--- a/examples/helm-local/chart/templates/ssh.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-{{- if .Values.ssh.known_hosts -}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: {{ template "flux.fullname" . }}-ssh-config
-data:
- known_hosts: |
- {{- if .Values.ssh.known_hosts }}
- {{- if contains "\n" .Values.ssh.known_hosts }}
- {{- range $value := .Values.ssh.known_hosts | splitList "\n" }}
- {{ print $value }}
- {{- end }}
- {{- else }}
- {{ .Values.ssh.known_hosts }}
- {{- end }}
- {{- end }}
-{{- end -}}
diff --git a/examples/helm-local/chart/values.yaml b/examples/helm-local/chart/values.yaml
deleted file mode 100755
index 5d5a88c275..0000000000
--- a/examples/helm-local/chart/values.yaml
+++ /dev/null
@@ -1,279 +0,0 @@
-# Default values for flux.
-
-# Weave Cloud service token
-token: ""
-
-replicaCount: 1
-
-logFormat: fmt
-
-image:
- repository: docker.io/fluxcd/flux
- tag: 1.18.0
- pullPolicy: IfNotPresent
- pullSecret:
-
-service:
- type: ClusterIP
- port: 3030
-
-# Specifies which namespaces flux should have access to
-allowedNamespaces: []
-
-rbac:
- # Specifies whether RBAC resources should be created
- create: true
- # Specifies whether PSP resources should be created
- pspEnabled: false
-
-serviceAccount:
- # Specifies whether a service account should be created
- create: true
- # The name of the service account to use.
- # If not set and create is true, a name is generated using the fullname template
- name:
- # Annotations for the Service Account
- annotations: {}
-
-# If create is `false` and no name is given, Flux will be restricted to
-# namespaces listed in allowedNamespaces and the namespace where it is
-# deployed, and the kubeconfig default context will be set to that namespace.
-clusterRole:
- create: true
- # The name of a cluster role to bind to; if not set and create is
- # true, a name based on fullname is generated
- name:
-
-resources:
- requests:
- cpu: 50m
- memory: 64Mi
-
-nodeSelector: {}
-
-annotations: {}
-
-podLabels: {}
-
-tolerations: []
-
-affinity: {}
-
-extraVolumeMounts: []
-
-extraVolumes: []
-
-# Optional DNS settings, configuring the ndots option may resolve
-# nslookup issues on some Kubernetes setups.
-# dnsPolicy: "None"
-# dnsConfig:
-# options:
-# - name: ndots
-# value: "1"
-
-gpgKeys:
- # These keys will be imported into GPG in the Flux container.
- secretName: ""
- # These keys will be imported into GPG in the Flux container.
- # NB: should only be used for public keys used to verify the
- # signatures of commits.
- configMapName: ""
-
-sync:
- # use `.sync.state: secret` to store flux's state as an annotation on the secret (instead of a git tag)
- state: git
- # Duration after which sync operations time out (defaults to 1m)
- timeout:
- # Controls how often Flux will apply what’s in git, to the cluster, absent new commits.
- # Reducing this interval below a minute or so may hinder Flux, since syncs can take tens of seconds,
- # leaving not much time to do other operations.
- # interval: "5m"
-
-git:
- # URL of git repo with Kubernetes manifests; e.g. git.url=ssh://git@github.com/fluxcd/flux-get-started
- url: ""
- # Branch of git repo to use for Kubernetes manifests
- branch: "master"
- # Path within git repo to locate Kubernetes manifests (relative path)
- path: ""
- # Set to `true` if you intend for Flux to not be able to push changes to git.
- # Also configure state.mode to `secret` since storing state in a git tag will no longer be possible.
- readonly: false
- # Username to use as git committer
- user: "Weave Flux"
- # Email to use as git committer
- email: "support@weave.works"
- # If set, commits will be signed with this GPG key.
- signingKey: ""
- # If set, the signatures of the sync tag and commits will be verified.
- verifySignatures: false
- # If set, the author of git commits will reflect the user who initiated the commit and will differ from the git committer.
- setAuthor: false
- # Label to keep track of sync progress
- label:
- # Append "[ci skip]" to commit messages so that CI will skip builds
- ciSkip: false
- # Period at which to poll git repo for new commits
- pollInterval: "5m"
- # Duration after which git operations time out
- timeout: "20s"
- # The secret name can be used to supply your own SSH key, instead of
- # relying on Flux to generate one for you:
- # 1. Generate a SSH key named identity: ssh-keygen -q -N "" -f ./identity
- # 2. Create a Kubernetes secret: kubectl -n flux create secret generic flux-ssh --from-file=./identity
- # 3. Delete the private key: rm ./identity
- # 4. Add ./identity.pub as a deployment key with write access in your Git repo
- # 5. Set the secret name (flux-ssh) below
- secretName: ""
- # Global Git configuration See https://git-scm.com/docs/git-config for more details.
- config:
- enabled: false
- secretName: ""
- data: ""
- # data: |
- # [credential "https://github.com"]
- # username = foo
-
-# If `true` SOPS support will be enabled
-sops:
- enabled: false
-
-registry:
- # Period at which to check for updated images
- automationInterval: "5m"
- # Maximum registry requests per second per host
- rps: 200
- # Maximum number of warmer connections to remote and memcache
- burst: 125
- # Output trace of image registry requests to log
- trace: false
- # Use HTTP rather than HTTPS for these image registry domains eg --set registry.insecureHosts="registry1.cluster.local\,registry2.cluster.local"
- insecureHosts:
- # Duration to keep cached image info. Must be < 1 month. (Deprecated)
- cacheExpiry:
- # Disable registry scanning completely
- disableScanning: false
- # Do not scan images that match these glob expressions
- excludeImage:
- # Allow usage of (RFC3339) timestamp labels from (canonical) image refs that match these glob expressions
- useTimestampLabels:
- # AWS ECR settings
- ecr:
- region:
- includeId:
- excludeId:
- require: false
- # Azure ACR settings
- acr:
- enabled: false
- hostPath: /etc/kubernetes/azure.json
- secretName: ""
- dockercfg:
- enabled: false
- secretName: ""
- configFileName: /dockercfg/config.json
-
-memcached:
- enabled: true
- hostnameOverride:
- repository: memcached
- tag: 1.5.20
- pullSecret:
- createClusterIP: true
- verbose: false
- maxItemSize: 5m
- maxMemory: 512
- addSecurityContext: true
- securityContext:
- runAsUser: 11211
- runAsGroup: 11211
- allowPrivilegeEscalation: false
- nodeSelector: {}
- tolerations: []
- affinity: {}
- resources: {}
- # If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 628Mi
- # requests:
- # cpu: 50m
- # memory: 512Mi
-
-ssh:
- # Overrides for git over SSH. If you use your own git server, you
- # will likely need to provide a host key for it in this field.
- known_hosts: ""
-
-kube:
- # Override for kubectl default config
- config: |
- apiVersion: v1
- clusters: []
- contexts:
- - context:
- cluster: ""
- namespace: default
- user: ""
- name: default
- current-context: default
- kind: Config
- preferences: {}
- users: []
-# For https://github.com/justinbarrick/fluxcloud/
-# additionalArgs:
-# - --connect=ws://fluxcloud
-
-# The contents of the secret will be defined as environment variables
-# in the Flux container. Once defined, you can use the variables in your
-# `git.url`: `https://$(GIT_AUTHUSER):$(GIT_AUTHKEY)@github.com/fluxcd/flux-get-started.git`
-env:
- secretName: ""
-
-# Additional environment variables to set
-extraEnvs: []
-# extraEnvs:
-# - name: FOO
-# value: bar
-
-prometheus:
- enabled: false
- serviceMonitor:
- # Enables ServiceMonitor creation for the Prometheus Operator
- create: false
- interval:
- scrapeTimeout:
- namespace:
- additionalLabels: {}
-
-syncGarbageCollection:
- enabled: false
- dry: false
-
-# Enables manifest generation
-manifestGeneration: false
-
-# Add your own init container or uncomment and modify the given example.
-initContainers: {}
-# flux-init: # <- will be used as container name
-# image: "busybox:1.30.1"
-# imagePullPolicy: "IfNotPresent"
-# command: ['sh', '-c', 'counter=0; until [ "$counter" -ge 30 ]; do if [ -f /tmp/flux-deploy-key/identity ]; then exit 0; else echo waiting for flux deploy key && sleep 1 && counter=$((counter+1)); fi; done; exit 1;']
-# volumeMounts:
-# - mountPath: /tmp/flux-deploy-key
-# name: flux-deploy-key
-
-# Additional containers to be added to the flux pod.
-extraContainers: []
-
-# Host aliases to be added to the Flux pod - see < - ip:
-hostAliases: {}
-# - ip: "127.0.0.1"
-# hostnames:
-# - "foo.local"
-# - "bar.local"
-# - ip: "10.1.2.3"
-# hostnames:
-# - "foo.remote"
-# - "bar.remote"
diff --git a/examples/kustomize/bundle.yaml b/examples/kustomize/bundle.yaml
deleted file mode 100644
index ad94e1b2f8..0000000000
--- a/examples/kustomize/bundle.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: kustomize
-targets:
-- name: prod
- clusterSelector:
- matchLabels:
- env: prod
- # note that this directory is relate to ./kustomize
- kustomizeDir: production
-
-- name: staging
- clusterSelector:
- matchLabels:
- env: staging
- # note that this directory is relate to ./kustomize
- kustomizeDir: staging
-
-- name: dev
- clusterSelector:
- matchLabels:
- env: dev
- # note that this directory is relate to ./kustomize
- kustomizeDir: dev
-
-- name: default
- # Match everything
- clusterSelector: {}
diff --git a/examples/kustomize/kustomize/base/kustomization.yaml b/examples/kustomize/kustomize/base/kustomization.yaml
deleted file mode 100644
index d577d5e5f9..0000000000
--- a/examples/kustomize/kustomize/base/kustomization.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-resources:
-- pod.yaml
\ No newline at end of file
diff --git a/examples/kustomize/kustomize/base/pod.yaml b/examples/kustomize/kustomize/base/pod.yaml
deleted file mode 100644
index fe5ee411aa..0000000000
--- a/examples/kustomize/kustomize/base/pod.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: Pod
-metadata:
- name: myapp-pod
- labels:
- app: myapp
-spec:
- containers:
- - name: nginx
- image: nginx:1.7.9
diff --git a/examples/kustomize/kustomize/dev/kustomization.yaml b/examples/kustomize/kustomize/dev/kustomization.yaml
deleted file mode 100644
index 4f7238aebe..0000000000
--- a/examples/kustomize/kustomize/dev/kustomization.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-resources:
-- ../base
-namePrefix: dev-
diff --git a/examples/kustomize/kustomize/kustomization.yaml b/examples/kustomize/kustomize/kustomization.yaml
deleted file mode 100644
index a721370eb1..0000000000
--- a/examples/kustomize/kustomize/kustomization.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-resources:
-- dev
-- staging
-- production
-namePrefix: cluster-a-
diff --git a/examples/kustomize/kustomize/production/kustomization.yaml b/examples/kustomize/kustomize/production/kustomization.yaml
deleted file mode 100644
index 4468dde453..0000000000
--- a/examples/kustomize/kustomize/production/kustomization.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-resources:
-- ../base
-namePrefix: prod-
diff --git a/examples/kustomize/kustomize/staging/kustomization.yaml b/examples/kustomize/kustomize/staging/kustomization.yaml
deleted file mode 100644
index 1e6ce66c27..0000000000
--- a/examples/kustomize/kustomize/staging/kustomization.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-resources:
-- ../base
-namePrefix: staging-
diff --git a/examples/yaml/bundle.yaml b/examples/yaml/bundle.yaml
deleted file mode 100644
index decafc1d0e..0000000000
--- a/examples/yaml/bundle.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: yaml
-defaultNamespace: nondefault
-
-targets:
-- name: merge
- overlays:
- - newfile-example
- - mergefile-example
- clusterSelector:
- matchLabels:
- region: one
-
-- name: changenamespace
- defaultNamespace: default
-
-- name: patch
- overlays:
- - patchfile-example
- clusterSelector:
- matchLabels:
- patch: "true"
-
-- name: default
- overlays:
- - newfile-example
- # {} will match everything
- clusterSelector: {}
diff --git a/examples/yaml/manifests/nginx.yaml b/examples/yaml/manifests/nginx.yaml
deleted file mode 100644
index 2e5256d17c..0000000000
--- a/examples/yaml/manifests/nginx.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
- creationTimestamp: null
- labels:
- run: webserver
- name: webserver
-spec:
- replicas: 1
- selector:
- matchLabels:
- run: webserver
- template:
- metadata:
- labels:
- run: webserver
- spec:
- containers:
- - image: nginx
- name: webserver
diff --git a/examples/yaml/overlays/mergefile-example/manifests/nginx_patch.yaml b/examples/yaml/overlays/mergefile-example/manifests/nginx_patch.yaml
deleted file mode 100644
index 9ed808b8c1..0000000000
--- a/examples/yaml/overlays/mergefile-example/manifests/nginx_patch.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-spec:
- template:
- spec:
- containers:
- - image: nginx:latest
- name: webserver
diff --git a/examples/yaml/overlays/newfile-example/manifests/svc.yaml b/examples/yaml/overlays/newfile-example/manifests/svc.yaml
deleted file mode 100644
index 3bc45f34ce..0000000000
--- a/examples/yaml/overlays/newfile-example/manifests/svc.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- name: webserver
-spec:
- ports:
- - port: 80
- targetPort: 80
- selector:
- run: webserver
diff --git a/examples/yaml/overlays/patchfile-example/manifests/nginx_patch.yaml b/examples/yaml/overlays/patchfile-example/manifests/nginx_patch.yaml
deleted file mode 100644
index b237b04b8e..0000000000
--- a/examples/yaml/overlays/patchfile-example/manifests/nginx_patch.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-# For a json patch it can be in json or yaml syntax
-- op: replace
- path: /spec/replicas
- value: 0
diff --git a/install.sh b/install.sh
deleted file mode 100644
index 447d04c64b..0000000000
--- a/install.sh
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/bin/sh
-set -e
-
-GITHUB_URL=https://github.com/rancher/fleet/releases
-
-SHA="sha256sum"
-if [ "$(uname)" = "Darwin" ]; then
- SHA="shasum -a 256"
-fi
-
-# --- helper functions for logs ---
-info()
-{
- echo "[INFO] " "$@"
-}
-fatal()
-{
- echo "[ERROR] " "$@"
- exit 1
-}
-
-
-# --- define needed environment variables ---
-setup_env() {
- # --- use sudo if we are not already root ---
- SUDO=sudo
- if [ -n "${SKIP_SUDO}" ] || [ "$(id -u)" = 0 ]; then
- SUDO=
- fi
-
- # --- use binary install directory if defined or create default ---
- if [ -n "${INSTALL_FLEET_BIN_DIR}" ]; then
- BIN_DIR="${INSTALL_FLEET_BIN_DIR}"
- else
- BIN_DIR="/usr/local/bin"
- fi
-
- # --- get hash of config & exec for currently installed fleet ---
- PRE_INSTALL_HASHES=$(get_installed_hashes)
- export PRE_INSTALL_HASHES
-}
-
-# --- set arch and suffix, fatal if architecture not supported ---
-setup_verify_arch() {
- OS=$(echo "$(uname)"|tr '[:upper:]' '[:lower:]')
-
- case "$OS" in
- # Minimalist GNU for Windows
- mingw*) OS='windows';;
- linux)
- ;;
- windows)
- ;;
- darwin)
- ;;
- *)
- fatal "Unsupported OS $OS"
- esac
-
- if [ -z "$ARCH" ]; then
- ARCH=$(uname -m)
- fi
- case $ARCH in
- amd64)
- ARCH=amd64
- ;;
- x86_64)
- ARCH=amd64
- ;;
- arm64)
- ARCH=arm64
- ;;
- aarch64)
- ARCH=arm64
- ;;
- arm*)
- ARCH=arm
- ;;
- *)
- fatal "Unsupported architecture $ARCH"
- esac
-
- SUFFIX=-${OS}-${ARCH}
-}
-
-# --- fatal if no curl ---
-verify_curl() {
- if [ -z "$(command -v curl)" ]; then
- fatal "Can not find curl for downloading files"
- fi
-}
-
-# --- create tempory directory and cleanup when done ---
-setup_tmp() {
- TMP_DIR=$(mktemp -d -t fleet-install.XXXXXXXXXX)
- TMP_HASH=${TMP_DIR}/fleet.hash
- TMP_BIN=${TMP_DIR}/fleet.bin
- cleanup() {
- code=$?
- set +e
- trap - EXIT
- rm -rf "${TMP_DIR}"
- exit $code
- }
- trap cleanup INT EXIT
-}
-
-# --- use desired fleet version if defined or find latest ---
-get_release_version() {
- if [ -n "${INSTALL_FLEET_VERSION}" ]; then
- VERSION_FLEET="${INSTALL_FLEET_VERSION}"
- else
- info "Finding latest release"
- VERSION_FLEET=$(curl -w "%{url_effective}" -I -L -s -S ${GITHUB_URL}/latest -o /dev/null | sed -e 's|.*/||')
- fi
- info "Using ${VERSION_FLEET} as release"
-}
-
-# --- download hash from github url ---
-download_hash() {
- HASH_URL=${GITHUB_URL}/download/${VERSION_FLEET}/sha256sum-${ARCH}.txt
- info "Downloading hash ${HASH_URL}"
- curl -o "${TMP_HASH}" -sfL "${HASH_URL}" || fatal "Hash download failed"
- HASH_EXPECTED=$(grep " fleet${SUFFIX}$" "${TMP_HASH}" | awk '{print $1}')
-}
-
-# --- check hash against installed version ---
-installed_hash_matches() {
- if [ -x ${BIN_DIR}/fleet ]; then
- HASH_INSTALLED=$($SHA ${BIN_DIR}/fleet | awk '{print $1}')
- if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then
- return
- fi
- fi
- return 1
-}
-
-# --- download binary from github url ---
-download_binary() {
- BIN_URL=${GITHUB_URL}/download/${VERSION_FLEET}/fleet${SUFFIX}
- info "Downloading binary ${BIN_URL}"
- curl -o "${TMP_BIN}" -fL "${BIN_URL}" || fatal "Binary download failed"
-}
-
-# --- verify downloaded binary hash ---
-verify_binary() {
- info "Verifying binary download"
- HASH_BIN=$($SHA "${TMP_BIN}" | awk '{print $1}')
- if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then
- fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}"
- fi
-}
-
-# --- setup permissions and move binary to system directory ---
-setup_binary() {
- chmod 755 "${TMP_BIN}"
- info "Installing fleet to ${BIN_DIR}/fleet"
- [ -n "$SUDO" ] && { $SUDO chown 0:0 "${TMP_BIN}"; }
- $SUDO mv -f "${TMP_BIN}" ${BIN_DIR}/fleet
-
- if command -v getenforce > /dev/null 2>&1; then
- if [ "Disabled" != "$(getenforce)" ]; then
- info "SeLinux is enabled, setting permissions"
- if ! $SUDO semanage fcontext -l | grep "${BIN_DIR}/fleet" > /dev/null 2>&1; then
- $SUDO semanage fcontext -a -t bin_t "${BIN_DIR}/fleet"
- fi
- $SUDO restorecon -v ${BIN_DIR}/fleet > /dev/null
- fi
- fi
-}
-
-# --- download and verify fleet ---
-download_and_verify() {
- setup_verify_arch
- verify_curl
- setup_tmp
- get_release_version
- download_hash
-
- if installed_hash_matches; then
- info "Skipping binary downloaded, installed fleet matches hash"
- return
- fi
-
- download_binary
- verify_binary
- setup_binary
-}
-
-# --- create uninstall script ---
-create_uninstall() {
- [ "${INSTALL_FLEET_BIN_DIR_READ_ONLY}" = "true" ] && return
- info "Creating uninstall script ${BIN_DIR}/${UNINSTALL_FLEET_SH}"
- $SUDO tee ${BIN_DIR}/${UNINSTALL_FLEET_SH} >/dev/null << EOF
-#!/bin/sh
-set -x
-[ \`id -u\` = 0 ] || exec sudo \$0 \$@
-
-remove_uninstall() {
- rm -f ${BIN_DIR}/${UNINSTALL_FLEET_SH}
-}
-trap remove_uninstall EXIT
-EOF
- $SUDO chmod 755 ${BIN_DIR}/${UNINSTALL_FLEET_SH}
- [ -n "$SUDO" ] && { $SUDO ${BIN_DIR}/${UNINSTALL_FLEET_SH}; }
-}
-
-# --- get hashes of the current fleet bin and service files
-get_installed_hashes() {
- $SUDO "$SHA" ${BIN_DIR}/fleet 2>&1 || true
-}
-
-# --- run the install process --
-{
- setup_env
- download_and_verify
- #create_uninstall
-}
diff --git a/mkdocs.yml b/mkdocs.yml
new file mode 100644
index 0000000000..044c4bc89c
--- /dev/null
+++ b/mkdocs.yml
@@ -0,0 +1,52 @@
+site_name: Fleet - GitOps at Scale
+repo_url: https://github.com/rancher/fleet
+strict: true
+theme:
+ name: material
+ palette:
+ primary: green
+ font:
+ text: 'Work Sans'
+ logo: 'assets/logo.svg'
+ #favicon: 'assets/favicon-32x32.png'
+#google_analytics:
+plugins:
+ - search
+ - markdownextradata: {}
+markdown_extensions:
+- codehilite
+- admonition
+- toc:
+ permalink: true
+nav:
+ - index.md
+ - quickstart.md
+ - concepts.md
+ - architecture.md
+ - examples.md
+ - Operator Guide:
+ - Installation:
+ - Overview: installation.md
+ - single-cluster-install.md
+ - multi-cluster-install.md
+ - uninstall.md
+ - namespaces.md
+ - Managing Clusters:
+ - Registration:
+ - cluster-overview.md
+ - cluster-tokens.md
+ - agent-initiated.md
+ - manager-initiated.md
+ - cluster-group.md
+ - User Guide:
+ - Managing Git Repos:
+ - gitrepo-add.md
+ - gitrepo-rm.md
+ - gitrepo-structure.md
+ - gitrepo-targets.md
+ - bundles.md
+extra:
+ fleet:
+ apiVersion: fleet.cattle.io/v1alpha1
+ version: v0.3.0-alpha5
+ helmversion: 0.3.0-alpha5
diff --git a/pkg/kustomize/kustomize.go b/pkg/kustomize/kustomize.go
index bc59cd0f55..9527ced52c 100644
--- a/pkg/kustomize/kustomize.go
+++ b/pkg/kustomize/kustomize.go
@@ -1,7 +1,6 @@
package kustomize
import (
- "encoding/json"
"path/filepath"
"strings"
@@ -15,7 +14,7 @@ import (
"sigs.k8s.io/kustomize/api/konfig"
"sigs.k8s.io/kustomize/api/krusty"
"sigs.k8s.io/kustomize/api/types"
- "sigs.k8s.io/kustomize/kyaml/yaml"
+ "sigs.k8s.io/yaml"
)
const (
@@ -66,7 +65,7 @@ func modifyKustomize(f filesys.FileSystem, dir string) error {
}
data["resources"] = append(resources, ManifestsYAML)
- fileBytes, err = json.Marshal(data)
+ fileBytes, err = yaml.Marshal(data)
if err != nil {
return err
}