Skip to content

Commit

Permalink
[ENH] ✨ implement s3instanceref and default and add
Browse files Browse the repository at this point in the history
allowedNamespaces
  • Loading branch information
Eneman Donatien authored and Eneman Donatien committed Sep 19, 2024
1 parent 34b85b3 commit bc9818a
Show file tree
Hide file tree
Showing 26 changed files with 445 additions and 216 deletions.
10 changes: 9 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ The parameters are summarized in the table below :
| `path-deletion` | false | - | no | Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. |
| `s3User-deletion` | false | - | no | Trigger S3User deletion on the S3 backend upon CR deletion. |
| `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop |
| `s3LabelSelector` | "" | - | no | Filter resource that this instance will manage. If Empty all resource in the cluster will be manage |
| `allowedNamespaces` | "" | - | no | namespace allowed to use default s3Instance |
## Minimal rights needed to work

The Operator need at least this rights:
Expand Down Expand Up @@ -170,6 +170,7 @@ spec:
secretName: minio-credentials # Name of the secret containing 2 Keys S3_ACCESS_KEY and S3_SECRET_KEY
region: us-east-1 # Region of the Provider
useSSL: true # useSSL to query the Provider
allowedNamespaces: [] # AllowedNamespaces to use this s3instance can get regexp if empty only the same namespace as s3instance is allowed
```
### Bucket example
Expand Down Expand Up @@ -307,6 +308,13 @@ spec:

Each S3user is linked to a kubernetes secret which have the same name that the S3User. The secret contains 2 keys: `accessKey` and `secretKey`.

### :info: How works s3InstanceRef

S3InstanceRef can get the following values:
- empty: In this case the s3instance use will be the default one configured at startup if the namespace is in the namespace allowed for this s3Instance
- `s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the current namespace (if the current namespace is allowed)
- `namespace/s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the namespace `namespace` (if the current namespace is allowed to use this s3Instance)

## Operator SDK generated guidelines

<details>
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/bucket_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type BucketSpec struct {

// s3InstanceRef where create the bucket
// +kubebuilder:validation:Optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3InstanceRef is immutable"
S3InstanceRef string `json:"s3InstanceRef,omitempty"`

// Quota to apply to the bucket
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/path_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type PathSpec struct {

// s3InstanceRef where create the Paths
// +kubebuilder:validation:Optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3InstanceRef is immutable"
S3InstanceRef string `json:"s3InstanceRef,omitempty"`
}

Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/policy_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type PolicySpec struct {

// s3InstanceRef where create the Policy
// +kubebuilder:validation:Optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3InstanceRef is immutable"
S3InstanceRef string `json:"s3InstanceRef,omitempty"`
}

Expand Down
8 changes: 6 additions & 2 deletions api/v1alpha1/s3instance_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,13 @@ type S3InstanceSpec struct {
// +kubebuilder:validation:Optional
UseSSL bool `json:"useSSL,omitempty"`

// CaCertificatesBase64 associated to the S3InstanceUrl
// Secret containing key ca.crt with the certificate associated to the S3InstanceUrl
// +kubebuilder:validation:Optional
CaCertificatesBase64 []string `json:"caCertificateBase64,omitempty"`
CaCertSecretRef string `json:"caCertSecretRef,omitempty"`

// AllowedNamespaces to use this S3InstanceUrl if empty only the namespace of this instance url is allowed to use it
// +kubebuilder:validation:Optional
AllowedNamespaces []string `json:"allowedNamespaces,omitempty"`
}

// S3InstanceStatus defines the observed state of S3Instance
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/s3user_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ type S3UserSpec struct {

// s3InstanceRef where create the user
// +kubebuilder:validation:Optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3InstanceRef is immutable"
S3InstanceRef string `json:"s3InstanceRef,omitempty"`
}

Expand Down
4 changes: 2 additions & 2 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions config/crd/bases/s3.onyxia.sh_buckets.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ spec:
s3InstanceRef:
description: s3InstanceRef where create the bucket
type: string
x-kubernetes-validations:
- message: S3InstanceRef is immutable
rule: self == oldSelf
required:
- name
- quota
Expand Down
3 changes: 3 additions & 0 deletions config/crd/bases/s3.onyxia.sh_paths.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ spec:
s3InstanceRef:
description: s3InstanceRef where create the Paths
type: string
x-kubernetes-validations:
- message: S3InstanceRef is immutable
rule: self == oldSelf
required:
- bucketName
type: object
Expand Down
3 changes: 3 additions & 0 deletions config/crd/bases/s3.onyxia.sh_policies.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ spec:
s3InstanceRef:
description: s3InstanceRef where create the Policy
type: string
x-kubernetes-validations:
- message: S3InstanceRef is immutable
rule: self == oldSelf
required:
- name
- policyContent
Expand Down
9 changes: 7 additions & 2 deletions config/crd/bases/s3.onyxia.sh_s3instances.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,16 @@ spec:
spec:
description: S3InstanceSpec defines the desired state of S3Instance
properties:
caCertificateBase64:
description: CaCertificatesBase64 associated to the S3InstanceUrl
allowedNamespaces:
description: AllowedNamespaces to use this S3InstanceUrl if empty
only the namespace of this instance url is allowed to use it
items:
type: string
type: array
caCertSecretRef:
description: Secret containing key ca.crt with the certificate associated
to the S3InstanceUrl
type: string
region:
description: region associated to the S3Instance
type: string
Expand Down
3 changes: 3 additions & 0 deletions config/crd/bases/s3.onyxia.sh_s3users.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ spec:
s3InstanceRef:
description: s3InstanceRef where create the user
type: string
x-kubernetes-validations:
- message: S3InstanceRef is immutable
rule: self == oldSelf
secretName:
description: SecretName associated to the S3User
type: string
Expand Down
63 changes: 37 additions & 26 deletions controllers/bucket_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package controllers
import (
"context"
"fmt"
"strings"
"time"

s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1"
Expand Down Expand Up @@ -74,19 +75,6 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
return ctrl.Result{}, err
}

// check if this object must be manage by this instance
if r.S3LabelSelectorValue != "" {
labelSelectorValue, found := bucketResource.Labels[utils.S3OperatorBucketLabelSelectorKey]
if !found {
logger.Info("This bucket ressouce will not be manage by this instance because this instance require that Bucket get labelSelector and label selector not found", "req.Name", req.Name, "Bucket Labels", bucketResource.Labels, "S3OperatorBucketLabelSelectorKey", utils.S3OperatorBucketLabelSelectorKey)
return ctrl.Result{}, nil
}
if labelSelectorValue != r.S3LabelSelectorValue {
logger.Info("This bucket ressouce will not be manage by this instance because this instance require that Bucket get specific a specific labelSelector value", "req.Name", req.Name, "expected", r.S3LabelSelectorValue, "current", labelSelectorValue)
return ctrl.Result{}, nil
}
}

// Managing bucket deletion with a finalizer
// REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources
isMarkedForDeletion := bucketResource.GetDeletionTimestamp() != nil
Expand Down Expand Up @@ -131,9 +119,15 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
// Create S3Client
s3Client, err := r.getS3InstanceForObject(ctx, bucketResource)
if err != nil {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
"Getting s3Client in cache has failed", err)
if customErr, ok := err.(*s3ClientCache.S3ClientNotFound); ok {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
customErr.Reason, err)
} else {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
"Unknown error occured while getting bucket", err)
}
}

// Bucket lifecycle management (other than deletion) starts here
Expand Down Expand Up @@ -298,23 +292,40 @@ func (r *BucketReconciler) SetBucketStatusConditionAndUpdate(ctx context.Context
func (r *BucketReconciler) getS3InstanceForObject(ctx context.Context, bucketResource *s3v1alpha1.Bucket) (factory.S3Client, error) {
logger := log.FromContext(ctx)
if bucketResource.Spec.S3InstanceRef == "" {
logger.Info("Bucket resource doesn't have S3InstanceRef fill, failback to default instance")
logger.Info("Bucket resource doesn't refer to s3Instance, failback to default one")
s3Client, found := r.S3ClientCache.Get("default")
if !found {
err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"}
logger.Error(err, "No default client was found")
err := &s3ClientCache.S3ClientNotFound{Reason: "Client not found"}
logger.Error(err, "Client \"default\" was not found")
return nil, err
} else {
if utils.IsAllowedNamespaces(bucketResource.Namespace, s3Client.GetConfig().AllowedNamespaces) {
return s3Client, nil
} else {
err := &s3ClientCache.S3ClientNotFound{Reason: "Client \"default\" was not found"}
return nil, err
}
}
return s3Client, nil
} else {

logger.Info(fmt.Sprintf("Bucket resource refer to s3Instance: %s, search instance in cache", bucketResource.Spec.S3InstanceRef))
s3Client, found := r.S3ClientCache.Get(bucketResource.Spec.S3InstanceRef)
logger.Info(fmt.Sprintf("Bucket resource doesn't refer to s3Instance: %s, search instance in cache", bucketResource.Spec.S3InstanceRef))
clientName := ""
if strings.Contains(bucketResource.Spec.S3InstanceRef, "/") {
clientName = bucketResource.Spec.S3InstanceRef
} else {
clientName = bucketResource.Namespace + "/" + bucketResource.Spec.S3InstanceRef
}
s3Client, found := r.S3ClientCache.Get(clientName)
if !found {
err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s, not found in cache", bucketResource.Spec.S3InstanceRef)}
logger.Error(err, "No client was found")
err := &s3ClientCache.S3ClientNotFound{Reason: fmt.Sprintf("S3InstanceRef: %s not found in cache", clientName)}
logger.Error(err, fmt.Sprintf("S3InstanceRef: %s not found in cache", clientName))
return nil, err
}
logger.Info(fmt.Sprintf("Check if BucketRessource %s can use S3Instance %s", bucketResource.Name, clientName))
if utils.IsAllowedNamespaces(bucketResource.Namespace, s3Client.GetConfig().AllowedNamespaces) {
return s3Client, nil
} else {
err := &s3ClientCache.S3ClientNotFound{Reason: fmt.Sprintf("Client %s is not allowed in this namespace", bucketResource.Spec.S3InstanceRef)}
return nil, err
}
return s3Client, nil
}
}
63 changes: 37 additions & 26 deletions controllers/path_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package controllers
import (
"context"
"fmt"
"strings"
"time"

s3ClientCache "github.com/InseeFrLab/s3-operator/internal/s3"
Expand Down Expand Up @@ -74,19 +75,6 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
return ctrl.Result{}, err
}

// check if this object must be manage by this instance
if r.S3LabelSelectorValue != "" {
labelSelectorValue, found := pathResource.Labels[utils.S3OperatorPathLabelSelectorKey]
if !found {
logger.Info("This paht ressouce will not be manage by this instance because this instance require that path get labelSelector and label selector not found", "req.Name", req.Name, "Bucket Labels", pathResource.Labels, "S3OperatorBucketLabelSelectorKey", utils.S3OperatorBucketLabelSelectorKey)
return ctrl.Result{}, nil
}
if labelSelectorValue != r.S3LabelSelectorValue {
logger.Info("This path ressouce will not be manage by this instance because this instance require that path get specific a specific labelSelector value", "req.Name", req.Name, "expected", r.S3LabelSelectorValue, "current", labelSelectorValue)
return ctrl.Result{}, nil
}
}

// Managing path deletion with a finalizer
// REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources
isMarkedForDeletion := pathResource.GetDeletionTimestamp() != nil
Expand Down Expand Up @@ -132,9 +120,15 @@ func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.
// Create S3Client
s3Client, err := r.getS3InstanceForObject(ctx, pathResource)
if err != nil {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
"Getting s3Client in cache has failed", err)
if customErr, ok := err.(*s3ClientCache.S3ClientNotFound); ok {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
customErr.Reason, err)
} else {
logger.Error(err, "an error occurred while getting s3Client")
return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "FailedS3Client",
"Unknown error occured while getting bucket", err)
}
}

// Path lifecycle management (other than deletion) starts here
Expand Down Expand Up @@ -264,23 +258,40 @@ func (r *PathReconciler) SetPathStatusConditionAndUpdate(ctx context.Context, pa
func (r *PathReconciler) getS3InstanceForObject(ctx context.Context, pathResource *s3v1alpha1.Path) (factory.S3Client, error) {
logger := log.FromContext(ctx)
if pathResource.Spec.S3InstanceRef == "" {
logger.Info("Bucket resource doesn't refer to s3Instance, failback to default one")
logger.Info("Path resource doesn't refer to s3Instance, failback to default one")
s3Client, found := r.S3ClientCache.Get("default")
if !found {
err := &s3ClientCache.S3ClientCacheError{Reason: "No default client was found"}
logger.Error(err, "No default client was found")
err := &s3ClientCache.S3ClientNotFound{Reason: "Client not found"}
logger.Error(err, "Client \"default\" was not found")
return nil, err
} else {
if utils.IsAllowedNamespaces(pathResource.Namespace, s3Client.GetConfig().AllowedNamespaces) {
return s3Client, nil
} else {
err := &s3ClientCache.S3ClientNotFound{Reason: "Client \"default\" was not found"}
return nil, err
}
}
return s3Client, nil
} else {

logger.Info(fmt.Sprintf("Bucket resource doesn't refer to s3Instance: %s, search instance in cache", pathResource.Spec.S3InstanceRef))
s3Client, found := r.S3ClientCache.Get(pathResource.Spec.S3InstanceRef)
logger.Info(fmt.Sprintf("Path resource doesn't refer to s3Instance: %s, search instance in cache", pathResource.Spec.S3InstanceRef))
clientName := ""
if strings.Contains(pathResource.Spec.S3InstanceRef, "/") {
clientName = pathResource.Spec.S3InstanceRef
} else {
clientName = pathResource.Namespace + "/" + pathResource.Spec.S3InstanceRef
}
s3Client, found := r.S3ClientCache.Get(clientName)
if !found {
err := &s3ClientCache.S3ClientCacheError{Reason: fmt.Sprintf("S3InstanceRef: %s,not found in cache", pathResource.Spec.S3InstanceRef)}
logger.Error(err, "No client was found")
err := &s3ClientCache.S3ClientNotFound{Reason: fmt.Sprintf("S3InstanceRef: %s not found in cache", clientName)}
logger.Error(err, fmt.Sprintf("S3InstanceRef: %s not found in cache", clientName))
return nil, err
}
logger.Info(fmt.Sprintf("Check if PathRessource %s can use S3Instance %s", pathResource.Name, clientName))
if utils.IsAllowedNamespaces(pathResource.Namespace, s3Client.GetConfig().AllowedNamespaces) {
return s3Client, nil
} else {
err := &s3ClientCache.S3ClientNotFound{Reason: fmt.Sprintf("Client %s is not allowed in this namespace", pathResource.Spec.S3InstanceRef)}
return nil, err
}
return s3Client, nil
}
}
Loading

0 comments on commit bc9818a

Please sign in to comment.